Source code for modelzoo.common.pytorch.layers.TransformerEncoder
# Copyright 2022 Cerebras Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/transformer.py
"""
from typing import Optional
import torch.nn as nn
from torch import Tensor
from modelzoo.common.pytorch.layers.utils import _get_clones
from modelzoo.common.pytorch.model_utils.RotaryPositionEmbeddingHelper import (
RotaryPositionEmbeddingHelper,
)
[docs]class TransformerEncoder(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
enable_nested_tensor: if True, input will automatically convert to nested tensor
(and convert back on output). This will improve the overall performance of
TransformerEncoder when padding rate is high. Default: ``False`` (disabled).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
[docs] def __init__(
self, encoder_layer, num_layers, norm=None, enable_nested_tensor=False
):
super(TransformerEncoder, self).__init__()
assert not enable_nested_tensor, "Nested tensors are not supported."
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
# Re-initialize all layers to get new set of weights for each layer
self.__reset_parameters()
def reset_parameters(self):
self.__reset_parameters()
def __reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
if self.norm:
if hasattr(self.norm, 'bias') and hasattr(self.norm.bias, "data"):
self.norm.bias.data.zero_()
if hasattr(self.norm, 'weight') and hasattr(
self.norm.weight, "data"
):
self.norm.weight.data.fill_(1.0)
[docs] def forward(
self,
src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
rotary_position_embedding_helper: Optional[
RotaryPositionEmbeddingHelper
] = None,
self_attn_position_bias: Optional[Tensor] = None,
**extra_args,
) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
rotary_position_embedding_helper (Optional[RotaryPositionEmbeddingHelper]):
A helper class to apply rotary embedding on the input tensor.
self_attn_position_bias: the tensor containing position bias to apply in self-attention,
can be obtained from relative or alibi position embeddings.
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
rotary_position_embedding_helper=rotary_position_embedding_helper,
self_attn_position_bias=self_attn_position_bias,
**extra_args,
)
if self.norm is not None:
output = self.norm(output)
return output