| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						"""PyTorch BERT model.""" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import importlib | 
					
					
						
						| 
							 | 
						import math | 
					
					
						
						| 
							 | 
						import os | 
					
					
						
						| 
							 | 
						import warnings | 
					
					
						
						| 
							 | 
						from dataclasses import dataclass | 
					
					
						
						| 
							 | 
						from typing import Optional, Tuple | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import torch | 
					
					
						
						| 
							 | 
						import torch.utils.checkpoint | 
					
					
						
						| 
							 | 
						from packaging import version | 
					
					
						
						| 
							 | 
						from torch import nn | 
					
					
						
						| 
							 | 
						from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						from transformers.activations import ACT2FN | 
					
					
						
						| 
							 | 
						from transformers.file_utils import ( | 
					
					
						
						| 
							 | 
						    ModelOutput, | 
					
					
						
						| 
							 | 
						    add_code_sample_docstrings, | 
					
					
						
						| 
							 | 
						    add_start_docstrings, | 
					
					
						
						| 
							 | 
						    add_start_docstrings_to_model_forward, | 
					
					
						
						| 
							 | 
						    replace_return_docstrings, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						from transformers.modeling_outputs import ( | 
					
					
						
						| 
							 | 
						    BaseModelOutputWithPastAndCrossAttentions, | 
					
					
						
						| 
							 | 
						    BaseModelOutputWithPoolingAndCrossAttentions, | 
					
					
						
						| 
							 | 
						    CausalLMOutputWithCrossAttentions, | 
					
					
						
						| 
							 | 
						    MaskedLMOutput, | 
					
					
						
						| 
							 | 
						    MultipleChoiceModelOutput, | 
					
					
						
						| 
							 | 
						    NextSentencePredictorOutput, | 
					
					
						
						| 
							 | 
						    QuestionAnsweringModelOutput, | 
					
					
						
						| 
							 | 
						    SequenceClassifierOutput, | 
					
					
						
						| 
							 | 
						    TokenClassifierOutput, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						from transformers.modeling_utils import PreTrainedModel | 
					
					
						
						| 
							 | 
						from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer | 
					
					
						
						| 
							 | 
						from transformers.utils import logging | 
					
					
						
						| 
							 | 
						from transformers.models.bert.configuration_bert import BertConfig | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						logger = logging.get_logger(__name__) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						_CHECKPOINT_FOR_DOC = "bert-base-uncased" | 
					
					
						
						| 
							 | 
						_CONFIG_FOR_DOC = "BertConfig" | 
					
					
						
						| 
							 | 
						_TOKENIZER_FOR_DOC = "BertTokenizer" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ | 
					
					
						
						| 
							 | 
						    "bert-base-uncased", | 
					
					
						
						| 
							 | 
						    "bert-large-uncased", | 
					
					
						
						| 
							 | 
						    "bert-base-cased", | 
					
					
						
						| 
							 | 
						    "bert-large-cased", | 
					
					
						
						| 
							 | 
						    "bert-base-multilingual-uncased", | 
					
					
						
						| 
							 | 
						    "bert-base-multilingual-cased", | 
					
					
						
						| 
							 | 
						    "bert-base-chinese", | 
					
					
						
						| 
							 | 
						    "bert-base-german-cased", | 
					
					
						
						| 
							 | 
						    "bert-large-uncased-whole-word-masking", | 
					
					
						
						| 
							 | 
						    "bert-large-cased-whole-word-masking", | 
					
					
						
						| 
							 | 
						    "bert-large-uncased-whole-word-masking-finetuned-squad", | 
					
					
						
						| 
							 | 
						    "bert-large-cased-whole-word-masking-finetuned-squad", | 
					
					
						
						| 
							 | 
						    "bert-base-cased-finetuned-mrpc", | 
					
					
						
						| 
							 | 
						    "bert-base-german-dbmdz-cased", | 
					
					
						
						| 
							 | 
						    "bert-base-german-dbmdz-uncased", | 
					
					
						
						| 
							 | 
						    "cl-tohoku/bert-base-japanese", | 
					
					
						
						| 
							 | 
						    "cl-tohoku/bert-base-japanese-whole-word-masking", | 
					
					
						
						| 
							 | 
						    "cl-tohoku/bert-base-japanese-char", | 
					
					
						
						| 
							 | 
						    "cl-tohoku/bert-base-japanese-char-whole-word-masking", | 
					
					
						
						| 
							 | 
						    "TurkuNLP/bert-base-finnish-cased-v1", | 
					
					
						
						| 
							 | 
						    "TurkuNLP/bert-base-finnish-uncased-v1", | 
					
					
						
						| 
							 | 
						    "wietsedv/bert-base-dutch-cased", | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def get_cls_by_name(name: str) -> type: | 
					
					
						
						| 
							 | 
						    """Get class by its name and module path. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        name (str): e.g., transfomers:T5ForConditionalGeneration, modeling_t5:my_class | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    Returns: | 
					
					
						
						| 
							 | 
						        type: found class for `name` | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    module_name, cls_name = name.split(':') | 
					
					
						
						| 
							 | 
						    return getattr(importlib.import_module(module_name), cls_name) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def load_tf_weights_in_bert(model, config, tf_checkpoint_path): | 
					
					
						
						| 
							 | 
						    """Load tf checkpoints in a pytorch model.""" | 
					
					
						
						| 
							 | 
						    try: | 
					
					
						
						| 
							 | 
						        import re | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        import numpy as np | 
					
					
						
						| 
							 | 
						        import tensorflow as tf | 
					
					
						
						| 
							 | 
						    except ImportError: | 
					
					
						
						| 
							 | 
						        logger.error( | 
					
					
						
						| 
							 | 
						            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " | 
					
					
						
						| 
							 | 
						            "https://www.tensorflow.org/install/ for installation instructions." | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        raise | 
					
					
						
						| 
							 | 
						    tf_path = os.path.abspath(tf_checkpoint_path) | 
					
					
						
						| 
							 | 
						    logger.info(f"Converting TensorFlow checkpoint from {tf_path}") | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    init_vars = tf.train.list_variables(tf_path) | 
					
					
						
						| 
							 | 
						    names = [] | 
					
					
						
						| 
							 | 
						    arrays = [] | 
					
					
						
						| 
							 | 
						    for name, shape in init_vars: | 
					
					
						
						| 
							 | 
						        logger.info(f"Loading TF weight {name} with shape {shape}") | 
					
					
						
						| 
							 | 
						        array = tf.train.load_variable(tf_path, name) | 
					
					
						
						| 
							 | 
						        names.append(name) | 
					
					
						
						| 
							 | 
						        arrays.append(array) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    for name, array in zip(names, arrays): | 
					
					
						
						| 
							 | 
						        name = name.split("/") | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if any( | 
					
					
						
						| 
							 | 
						            n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] | 
					
					
						
						| 
							 | 
						            for n in name | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						            logger.info(f"Skipping {'/'.join(name)}") | 
					
					
						
						| 
							 | 
						            continue | 
					
					
						
						| 
							 | 
						        pointer = model | 
					
					
						
						| 
							 | 
						        for m_name in name: | 
					
					
						
						| 
							 | 
						            if re.fullmatch(r"[A-Za-z]+_\d+", m_name): | 
					
					
						
						| 
							 | 
						                scope_names = re.split(r"_(\d+)", m_name) | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                scope_names = [m_name] | 
					
					
						
						| 
							 | 
						            if scope_names[0] == "kernel" or scope_names[0] == "gamma": | 
					
					
						
						| 
							 | 
						                pointer = getattr(pointer, "weight") | 
					
					
						
						| 
							 | 
						            elif scope_names[0] == "output_bias" or scope_names[0] == "beta": | 
					
					
						
						| 
							 | 
						                pointer = getattr(pointer, "bias") | 
					
					
						
						| 
							 | 
						            elif scope_names[0] == "output_weights": | 
					
					
						
						| 
							 | 
						                pointer = getattr(pointer, "weight") | 
					
					
						
						| 
							 | 
						            elif scope_names[0] == "squad": | 
					
					
						
						| 
							 | 
						                pointer = getattr(pointer, "classifier") | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                try: | 
					
					
						
						| 
							 | 
						                    pointer = getattr(pointer, scope_names[0]) | 
					
					
						
						| 
							 | 
						                except AttributeError: | 
					
					
						
						| 
							 | 
						                    logger.info(f"Skipping {'/'.join(name)}") | 
					
					
						
						| 
							 | 
						                    continue | 
					
					
						
						| 
							 | 
						            if len(scope_names) >= 2: | 
					
					
						
						| 
							 | 
						                num = int(scope_names[1]) | 
					
					
						
						| 
							 | 
						                pointer = pointer[num] | 
					
					
						
						| 
							 | 
						        if m_name[-11:] == "_embeddings": | 
					
					
						
						| 
							 | 
						            pointer = getattr(pointer, "weight") | 
					
					
						
						| 
							 | 
						        elif m_name == "kernel": | 
					
					
						
						| 
							 | 
						            array = np.transpose(array) | 
					
					
						
						| 
							 | 
						        try: | 
					
					
						
						| 
							 | 
						            if pointer.shape != array.shape: | 
					
					
						
						| 
							 | 
						                raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") | 
					
					
						
						| 
							 | 
						        except AssertionError as e: | 
					
					
						
						| 
							 | 
						            e.args += (pointer.shape, array.shape) | 
					
					
						
						| 
							 | 
						            raise | 
					
					
						
						| 
							 | 
						        logger.info(f"Initialize PyTorch weight {name}") | 
					
					
						
						| 
							 | 
						        pointer.data = torch.from_numpy(array) | 
					
					
						
						| 
							 | 
						    return model | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertEmbeddings(nn.Module): | 
					
					
						
						| 
							 | 
						    """Construct the embeddings from word, position and token_type embeddings.""" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) | 
					
					
						
						| 
							 | 
						        if config.position_embedding_type == 'absolute': | 
					
					
						
						| 
							 | 
						            self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) | 
					
					
						
						| 
							 | 
						        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(config.hidden_dropout_prob) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") | 
					
					
						
						| 
							 | 
						        self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) | 
					
					
						
						| 
							 | 
						        if version.parse(torch.__version__) > version.parse("1.6.0"): | 
					
					
						
						| 
							 | 
						            self.register_buffer( | 
					
					
						
						| 
							 | 
						                "token_type_ids", | 
					
					
						
						| 
							 | 
						                torch.zeros(self.position_ids.size(), dtype=torch.long), | 
					
					
						
						| 
							 | 
						                persistent=False, | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        if input_ids is not None: | 
					
					
						
						| 
							 | 
						            input_shape = input_ids.size() | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            input_shape = inputs_embeds.size()[:-1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        seq_length = input_shape[1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if position_ids is None: | 
					
					
						
						| 
							 | 
						            position_ids = self.position_ids[:, past_key_values_length: seq_length + past_key_values_length] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if token_type_ids is None: | 
					
					
						
						| 
							 | 
						            if hasattr(self, "token_type_ids"): | 
					
					
						
						| 
							 | 
						                buffered_token_type_ids = self.token_type_ids[:, :seq_length] | 
					
					
						
						| 
							 | 
						                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) | 
					
					
						
						| 
							 | 
						                token_type_ids = buffered_token_type_ids_expanded | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) | 
					
					
						
						| 
							 | 
						        if inputs_embeds is None: | 
					
					
						
						| 
							 | 
						            inputs_embeds = self.word_embeddings(input_ids) | 
					
					
						
						| 
							 | 
						        token_type_embeddings = self.token_type_embeddings(token_type_ids) | 
					
					
						
						| 
							 | 
						        embeddings = inputs_embeds + token_type_embeddings | 
					
					
						
						| 
							 | 
						        if self.position_embedding_type == "absolute": | 
					
					
						
						| 
							 | 
						            position_embeddings = self.position_embeddings(position_ids) | 
					
					
						
						| 
							 | 
						            embeddings += position_embeddings | 
					
					
						
						| 
							 | 
						        embeddings = self.LayerNorm(embeddings) | 
					
					
						
						| 
							 | 
						        embeddings = self.dropout(embeddings) | 
					
					
						
						| 
							 | 
						        return embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertSelfAttention(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config, position_embedding_type=None, has_relative_attention_bias=False): | 
					
					
						
						| 
							 | 
						        """Bert self-attention with abs/relative position encodings and sparsity. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            config: HF model configuration loaded from json | 
					
					
						
						| 
							 | 
						            position_embedding_type (str, optional): absolute, relative_key, relative_key_query or | 
					
					
						
						| 
							 | 
						                relative_attention_bias . Defaults to None. | 
					
					
						
						| 
							 | 
						            has_relative_attention_bias (bool, optional): Use it's own relative embeddings matrix. Defaults to False. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): | 
					
					
						
						| 
							 | 
						            raise ValueError( | 
					
					
						
						| 
							 | 
						                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " | 
					
					
						
						| 
							 | 
						                f"heads ({config.num_attention_heads})" | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.config = config | 
					
					
						
						| 
							 | 
						        self.is_decoder = config.is_decoder | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.max_seq_len = config.max_position_embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.num_attention_heads = config.num_attention_heads | 
					
					
						
						| 
							 | 
						        self.attention_head_size = int(config.hidden_size / config.num_attention_heads) | 
					
					
						
						| 
							 | 
						        self.all_head_size = self.num_attention_heads * self.attention_head_size | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.is_sparse = False | 
					
					
						
						| 
							 | 
						        sparse_config_cls_name = getattr(config, 'sparse_config_cls', None) | 
					
					
						
						| 
							 | 
						        if sparse_config_cls_name: | 
					
					
						
						| 
							 | 
						            self.is_sparse = True | 
					
					
						
						| 
							 | 
						            sparse_config_cls = get_cls_by_name(sparse_config_cls_name) | 
					
					
						
						| 
							 | 
						            self.sparse_config = sparse_config_cls(**self.config.sparse_attention) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_decoder and self.is_sparse: | 
					
					
						
						| 
							 | 
						            raise RuntimeError('SparseAttention with BertModel decoder is not currently supported!') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.query = nn.Linear(config.hidden_size, self.all_head_size) | 
					
					
						
						| 
							 | 
						        self.key = nn.Linear(config.hidden_size, self.all_head_size) | 
					
					
						
						| 
							 | 
						        self.value = nn.Linear(config.hidden_size, self.all_head_size) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(config.attention_probs_dropout_prob) | 
					
					
						
						| 
							 | 
						        self.softmax = nn.Softmax(dim=-1) | 
					
					
						
						| 
							 | 
						        self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute") | 
					
					
						
						| 
							 | 
						        self.has_relative_attention_bias = has_relative_attention_bias | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_sparse and self.position_embedding_type not in ['absolute', 'relative_attention_bias', 'rotary']: | 
					
					
						
						| 
							 | 
						            raise RuntimeError(f'SparseAttention supports `absolute`, `relative_attention_bias` and `rotary` position ' | 
					
					
						
						| 
							 | 
						                               f'embeddings, but: position_embeddings_type = {self.position_embedding_type}') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_decoder and self.position_embedding_type == 'relative_attention_bias': | 
					
					
						
						| 
							 | 
						            raise RuntimeError(f'BertSelfAttention does not support `relative_attention_bias` with `is_decoder` ' | 
					
					
						
						| 
							 | 
						                               f' = {self.is_decoder}') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": | 
					
					
						
						| 
							 | 
						            self.max_position_embeddings = config.max_position_embeddings | 
					
					
						
						| 
							 | 
						            self.max_seq_len = 2 * config.max_position_embeddings | 
					
					
						
						| 
							 | 
						            self.distance_embedding = nn.Embedding(self.max_distance - 1, self.attention_head_size) | 
					
					
						
						| 
							 | 
						        elif self.position_embedding_type == 'relative_attention_bias' and self.has_relative_attention_bias: | 
					
					
						
						| 
							 | 
						            self.relative_attention_num_buckets = self.config.relative_attention_num_buckets | 
					
					
						
						| 
							 | 
						            self.relative_last_bucket_distance = self.config.relative_last_bucket_distance | 
					
					
						
						| 
							 | 
						            self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.num_attention_heads) | 
					
					
						
						| 
							 | 
						        elif self.position_embedding_type == 'rotary': | 
					
					
						
						| 
							 | 
						            self.rotary_base = getattr(config, 'rotary_base', None) | 
					
					
						
						| 
							 | 
						            self.rotary_dim = getattr(config, 'rotary_dim', self.attention_head_size) | 
					
					
						
						| 
							 | 
						            self.rotary_emb = RotaryEmbedding(self.rotary_dim, base=self.rotary_base) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_sparse: | 
					
					
						
						| 
							 | 
						            try: | 
					
					
						
						| 
							 | 
						                from deepspeed.ops.sparse_attention import SparseSelfAttention | 
					
					
						
						| 
							 | 
						            except ImportError as e: | 
					
					
						
						| 
							 | 
						                logger.error(f'DeepSpeed is required for Sparse Ops: {e}') | 
					
					
						
						| 
							 | 
						                raise | 
					
					
						
						| 
							 | 
						            self.sparse_self_attention = SparseSelfAttention(self.sparse_config, max_seq_length=self.max_seq_len) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def transpose_for_scores(self, x): | 
					
					
						
						| 
							 | 
						        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) | 
					
					
						
						| 
							 | 
						        x = x.view(new_x_shape) | 
					
					
						
						| 
							 | 
						        return x.permute(0, 2, 1, 3) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def transpose_key_for_scores(self, x): | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) | 
					
					
						
						| 
							 | 
						        x = x.view(*new_x_shape) | 
					
					
						
						| 
							 | 
						        return x.permute(0, 2, 3, 1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @staticmethod | 
					
					
						
						| 
							 | 
						    def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        Adapted from Mesh Tensorflow: | 
					
					
						
						| 
							 | 
						        https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        #todo: refactor, the same code is used in modeling_t5 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Translate relative position to a bucket number for relative attention. The relative position is defined as | 
					
					
						
						| 
							 | 
						        memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to | 
					
					
						
						| 
							 | 
						        position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for | 
					
					
						
						| 
							 | 
						        small absolute relative_position and larger buckets for larger absolute relative_positions. All relative | 
					
					
						
						| 
							 | 
						        positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. | 
					
					
						
						| 
							 | 
						        This should allow for more graceful generalization to longer sequences than the model has been trained on | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            relative_position: an int32 Tensor | 
					
					
						
						| 
							 | 
						            bidirectional: a boolean - whether the attention is bidirectional | 
					
					
						
						| 
							 | 
						            num_buckets: an integer | 
					
					
						
						| 
							 | 
						            max_distance: an integer | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						            a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        relative_buckets = 0 | 
					
					
						
						| 
							 | 
						        if bidirectional: | 
					
					
						
						| 
							 | 
						            num_buckets //= 2 | 
					
					
						
						| 
							 | 
						            relative_buckets += (relative_position > 0).to(torch.long) * num_buckets | 
					
					
						
						| 
							 | 
						            relative_position = torch.abs(relative_position) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        max_exact = num_buckets // 2 | 
					
					
						
						| 
							 | 
						        is_small = relative_position < max_exact | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        relative_postion_if_large = max_exact + ( | 
					
					
						
						| 
							 | 
						            torch.log(relative_position.float() / max_exact) | 
					
					
						
						| 
							 | 
						            / math.log(max_distance / max_exact) | 
					
					
						
						| 
							 | 
						            * (num_buckets - max_exact) | 
					
					
						
						| 
							 | 
						        ).to(torch.long) | 
					
					
						
						| 
							 | 
						        relative_postion_if_large = torch.min( | 
					
					
						
						| 
							 | 
						            relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large) | 
					
					
						
						| 
							 | 
						        return relative_buckets | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def compute_bias(self, query_length, key_length): | 
					
					
						
						| 
							 | 
						        """ Compute binned relative position bias """ | 
					
					
						
						| 
							 | 
						        context_position = torch.arange(query_length, dtype=torch.long)[:, None] | 
					
					
						
						| 
							 | 
						        memory_position = torch.arange(key_length, dtype=torch.long)[None, :] | 
					
					
						
						| 
							 | 
						        relative_position = memory_position - context_position   | 
					
					
						
						| 
							 | 
						        relative_position_bucket = self._relative_position_bucket( | 
					
					
						
						| 
							 | 
						            relative_position,   | 
					
					
						
						| 
							 | 
						            bidirectional=(not self.is_decoder), | 
					
					
						
						| 
							 | 
						            num_buckets=self.relative_attention_num_buckets, | 
					
					
						
						| 
							 | 
						            max_distance=self.relative_last_bucket_distance, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) | 
					
					
						
						| 
							 | 
						        values = self.relative_attention_bias(relative_position_bucket)   | 
					
					
						
						| 
							 | 
						        values = values.permute([2, 0, 1]).unsqueeze(0)   | 
					
					
						
						| 
							 | 
						        return values | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_relative_attention_bias(self, position_bias, batch_size, query_length, key_length): | 
					
					
						
						| 
							 | 
						        if position_bias is None and self.has_relative_attention_bias: | 
					
					
						
						| 
							 | 
						            position_bias = self.compute_bias(query_length, key_length) | 
					
					
						
						| 
							 | 
						            position_bias = position_bias.repeat(batch_size, 1, 1, 1) | 
					
					
						
						| 
							 | 
						        return position_bias | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        hidden_states, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        past_key_value=None, | 
					
					
						
						| 
							 | 
						        position_bias=None, | 
					
					
						
						| 
							 | 
						        output_attentions=False, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        mixed_query_layer = self.query(hidden_states) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        is_cross_attention = encoder_hidden_states is not None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if is_cross_attention and past_key_value is not None: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            key_layer = past_key_value[0] | 
					
					
						
						| 
							 | 
						            value_layer = past_key_value[1] | 
					
					
						
						| 
							 | 
						            attention_mask = encoder_attention_mask | 
					
					
						
						| 
							 | 
						        elif is_cross_attention: | 
					
					
						
						| 
							 | 
						            key_layer = self.transpose_key_for_scores(self.key(encoder_hidden_states)) | 
					
					
						
						| 
							 | 
						            value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) | 
					
					
						
						| 
							 | 
						            attention_mask = encoder_attention_mask | 
					
					
						
						| 
							 | 
						        elif past_key_value is not None: | 
					
					
						
						| 
							 | 
						            key_layer = self.transpose_key_for_scores(self.key(hidden_states)) | 
					
					
						
						| 
							 | 
						            value_layer = self.transpose_for_scores(self.value(hidden_states)) | 
					
					
						
						| 
							 | 
						            key_layer = torch.cat([past_key_value[0], key_layer], dim=2) | 
					
					
						
						| 
							 | 
						            value_layer = torch.cat([past_key_value[1], value_layer], dim=2) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            key_layer = self.transpose_key_for_scores(self.key(hidden_states)) | 
					
					
						
						| 
							 | 
						            value_layer = self.transpose_for_scores(self.value(hidden_states)) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        query_layer = self.transpose_for_scores(mixed_query_layer) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_decoder: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            past_key_value = (key_layer, value_layer) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        bs, seq_len, _ = hidden_states.shape | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.position_embedding_type == 'rotary': | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if past_key_value is not None: | 
					
					
						
						| 
							 | 
						                raise RuntimeError(f'past_key_values is not None are not supported in BertSelfAttention.forward with ' | 
					
					
						
						| 
							 | 
						                                   f'position_embedding_type = {self.position_embedding_type}.') | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            key_layer = key_layer.transpose(-1, -2) | 
					
					
						
						| 
							 | 
						            if self.rotary_dim < self.attention_head_size: | 
					
					
						
						| 
							 | 
						                query_rot = query_layer[..., :self.rotary_dim] | 
					
					
						
						| 
							 | 
						                query_pass = query_layer[..., self.rotary_dim:] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                key_rot = key_layer[..., :self.rotary_dim] | 
					
					
						
						| 
							 | 
						                key_pass = key_layer[..., self.rotary_dim:] | 
					
					
						
						| 
							 | 
						            else:   | 
					
					
						
						| 
							 | 
						                query_rot = query_layer | 
					
					
						
						| 
							 | 
						                key_rot = key_layer | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            cos, sin = self.rotary_emb(key_rot, seq_len=seq_len) | 
					
					
						
						| 
							 | 
						            query_layer, key_layer = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, offset=0) | 
					
					
						
						| 
							 | 
						            if self.rotary_dim < self.attention_head_size: | 
					
					
						
						| 
							 | 
						                query_layer = torch.cat((query_layer, query_pass), dim=-1) | 
					
					
						
						| 
							 | 
						                key_layer = torch.cat((key_layer, key_pass), dim=-1) | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            key_layer = key_layer.transpose(-1, -2) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not self.is_sparse: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            attention_scores = torch.matmul(query_layer, key_layer) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            if self.position_embedding_type in ["relative_key", "relative_key_query"]: | 
					
					
						
						| 
							 | 
						                position_ids_l = torch.arange(seq_len, dtype=torch.long, device=hidden_states.device).view(-1, 1) | 
					
					
						
						| 
							 | 
						                position_ids_r = torch.arange(seq_len, dtype=torch.long, device=hidden_states.device).view(1, -1) | 
					
					
						
						| 
							 | 
						                distance = position_ids_l - position_ids_r | 
					
					
						
						| 
							 | 
						                positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) | 
					
					
						
						| 
							 | 
						                positional_embedding = positional_embedding.to(dtype=query_layer.dtype)   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                if self.position_embedding_type == "relative_key": | 
					
					
						
						| 
							 | 
						                    relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) | 
					
					
						
						| 
							 | 
						                    attention_scores = attention_scores + relative_position_scores | 
					
					
						
						| 
							 | 
						                elif self.position_embedding_type == "relative_key_query": | 
					
					
						
						| 
							 | 
						                    relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) | 
					
					
						
						| 
							 | 
						                    relative_position_scores_key = torch.einsum("bhdr,lrd->bhlr", key_layer, positional_embedding) | 
					
					
						
						| 
							 | 
						                    attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key | 
					
					
						
						| 
							 | 
						                elif self.position_embedding_type == 'relative_attention_bias': | 
					
					
						
						| 
							 | 
						                    position_bias = self.get_relative_attention_bias(position_bias, bs, seq_len, seq_len) | 
					
					
						
						| 
							 | 
						                    attention_scores = attention_scores + position_bias | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            attention_scores = attention_scores / math.sqrt(self.attention_head_size) | 
					
					
						
						| 
							 | 
						            if attention_mask is not None: | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                attention_scores = attention_scores + attention_mask | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            attention_probs = self.softmax(attention_scores) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            attention_probs = self.dropout(attention_probs) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if head_mask is not None: | 
					
					
						
						| 
							 | 
						                attention_probs = attention_probs * head_mask | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            context_layer = torch.matmul(attention_probs, value_layer) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if self.position_embedding_type == 'relative_attention_bias': | 
					
					
						
						| 
							 | 
						                position_bias = self.get_relative_attention_bias(position_bias, bs, seq_len, seq_len) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            query_dtype = query_layer.dtype | 
					
					
						
						| 
							 | 
						            if query_dtype != torch.half: | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                query_layer, key_layer, value_layer = query_layer.half(), key_layer.half(), value_layer.half() | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                if position_bias is not None: | 
					
					
						
						| 
							 | 
						                    position_bias = position_bias.half() | 
					
					
						
						| 
							 | 
						            context_layer = self.sparse_self_attention(query_layer, key_layer, value_layer, rpe=position_bias, | 
					
					
						
						| 
							 | 
						                                                       key_padding_mask=attention_mask) | 
					
					
						
						| 
							 | 
						            if query_dtype == torch.float: | 
					
					
						
						| 
							 | 
						                context_layer = context_layer.float() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        context_layer = context_layer.permute(0, 2, 1, 3).contiguous() | 
					
					
						
						| 
							 | 
						        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) | 
					
					
						
						| 
							 | 
						        context_layer = context_layer.view(new_context_layer_shape) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_sparse and output_attentions: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            raise RuntimeError(f'SparseAttention does not support output_attention = {output_attentions}') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.position_embedding_type == 'relative_attention_bias': | 
					
					
						
						| 
							 | 
						            outputs = outputs + (position_bias,) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_decoder: | 
					
					
						
						| 
							 | 
						            outputs = outputs + (past_key_value,) | 
					
					
						
						| 
							 | 
						        return outputs | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertSelfOutput(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.pre_layer_norm = getattr(config, 'pre_layer_norm', False) | 
					
					
						
						| 
							 | 
						        self.bert_output_layer = True | 
					
					
						
						| 
							 | 
						        self.dense = nn.Linear(config.hidden_size, config.hidden_size) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(config.hidden_dropout_prob) | 
					
					
						
						| 
							 | 
						        if not self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states, input_tensor): | 
					
					
						
						| 
							 | 
						        hidden_states = self.dense(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.dropout(hidden_states) | 
					
					
						
						| 
							 | 
						        if not self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            hidden_states = self.LayerNorm(hidden_states + input_tensor) | 
					
					
						
						| 
							 | 
						        return hidden_states | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertAttention(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config, position_embedding_type=None, has_relative_attention_bias=False): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type, | 
					
					
						
						| 
							 | 
						                                      has_relative_attention_bias=has_relative_attention_bias) | 
					
					
						
						| 
							 | 
						        self.output = BertSelfOutput(config) | 
					
					
						
						| 
							 | 
						        self.pruned_heads = set() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def prune_heads(self, heads): | 
					
					
						
						| 
							 | 
						        if len(heads) == 0: | 
					
					
						
						| 
							 | 
						            return | 
					
					
						
						| 
							 | 
						        heads, index = find_pruneable_heads_and_indices( | 
					
					
						
						| 
							 | 
						            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.self.query = prune_linear_layer(self.self.query, index) | 
					
					
						
						| 
							 | 
						        self.self.key = prune_linear_layer(self.self.key, index) | 
					
					
						
						| 
							 | 
						        self.self.value = prune_linear_layer(self.self.value, index) | 
					
					
						
						| 
							 | 
						        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.self.num_attention_heads = self.self.num_attention_heads - len(heads) | 
					
					
						
						| 
							 | 
						        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads | 
					
					
						
						| 
							 | 
						        self.pruned_heads = self.pruned_heads.union(heads) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        hidden_states, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        past_key_value=None, | 
					
					
						
						| 
							 | 
						        position_bias=None, | 
					
					
						
						| 
							 | 
						        output_attentions=False, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        self_outputs = self.self( | 
					
					
						
						| 
							 | 
						            hidden_states, | 
					
					
						
						| 
							 | 
						            attention_mask, | 
					
					
						
						| 
							 | 
						            head_mask, | 
					
					
						
						| 
							 | 
						            encoder_hidden_states, | 
					
					
						
						| 
							 | 
						            encoder_attention_mask, | 
					
					
						
						| 
							 | 
						            past_key_value, | 
					
					
						
						| 
							 | 
						            position_bias, | 
					
					
						
						| 
							 | 
						            output_attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        attention_output = self.output(self_outputs[0], hidden_states) | 
					
					
						
						| 
							 | 
						        outputs = (attention_output,) + self_outputs[1:]   | 
					
					
						
						| 
							 | 
						        return outputs | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertIntermediate(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.dense = nn.Linear(config.hidden_size, config.intermediate_size) | 
					
					
						
						| 
							 | 
						        if isinstance(config.hidden_act, str): | 
					
					
						
						| 
							 | 
						            self.intermediate_act_fn = ACT2FN[config.hidden_act] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            self.intermediate_act_fn = config.hidden_act | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states): | 
					
					
						
						| 
							 | 
						        hidden_states = self.dense(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.intermediate_act_fn(hidden_states) | 
					
					
						
						| 
							 | 
						        return hidden_states | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertOutput(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.pre_layer_norm = getattr(config, 'pre_layer_norm', False) | 
					
					
						
						| 
							 | 
						        self.bert_output_layer = True | 
					
					
						
						| 
							 | 
						        self.dense = nn.Linear(config.intermediate_size, config.hidden_size) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(config.hidden_dropout_prob) | 
					
					
						
						| 
							 | 
						        if not self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states, input_tensor): | 
					
					
						
						| 
							 | 
						        hidden_states = self.dense(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.dropout(hidden_states) | 
					
					
						
						| 
							 | 
						        if not self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            hidden_states = self.LayerNorm(hidden_states + input_tensor) | 
					
					
						
						| 
							 | 
						        return hidden_states | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertLayer(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config, has_relative_attention_bias=False): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.chunk_size_feed_forward = config.chunk_size_feed_forward | 
					
					
						
						| 
							 | 
						        self.seq_len_dim = 1 | 
					
					
						
						| 
							 | 
						        self.pre_layer_norm = getattr(config, 'pre_layer_norm', False) | 
					
					
						
						| 
							 | 
						        if self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            self.pre_attention_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						            self.post_attention_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						        self.attention = BertAttention(config, has_relative_attention_bias=has_relative_attention_bias) | 
					
					
						
						| 
							 | 
						        self.is_decoder = config.is_decoder | 
					
					
						
						| 
							 | 
						        self.add_cross_attention = config.add_cross_attention | 
					
					
						
						| 
							 | 
						        if self.add_cross_attention: | 
					
					
						
						| 
							 | 
						            if not self.is_decoder: | 
					
					
						
						| 
							 | 
						                raise ValueError(f"{self} should be used as a decoder model if cross attention is added") | 
					
					
						
						| 
							 | 
						            self.crossattention = BertAttention(config, position_embedding_type="absolute") | 
					
					
						
						| 
							 | 
						        self.intermediate = BertIntermediate(config) | 
					
					
						
						| 
							 | 
						        self.output = BertOutput(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        hidden_states, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        past_key_value=None, | 
					
					
						
						| 
							 | 
						        position_bias=None, | 
					
					
						
						| 
							 | 
						        output_attentions=False, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None | 
					
					
						
						| 
							 | 
						        self_attention_outputs = self.attention( | 
					
					
						
						| 
							 | 
						            hidden_states if not self.pre_layer_norm else self.pre_attention_ln(hidden_states), | 
					
					
						
						| 
							 | 
						            attention_mask, | 
					
					
						
						| 
							 | 
						            head_mask, | 
					
					
						
						| 
							 | 
						            position_bias=position_bias, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            past_key_value=self_attn_past_key_value, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        attention_output = self_attention_outputs[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if self.is_decoder: | 
					
					
						
						| 
							 | 
						            outputs = self_attention_outputs[1:-1] | 
					
					
						
						| 
							 | 
						            present_key_value = self_attention_outputs[-1] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            outputs = self_attention_outputs[1:]   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        cross_attn_present_key_value = None | 
					
					
						
						| 
							 | 
						        if self.is_decoder and encoder_hidden_states is not None: | 
					
					
						
						| 
							 | 
						            if not hasattr(self, "crossattention"): | 
					
					
						
						| 
							 | 
						                raise ValueError( | 
					
					
						
						| 
							 | 
						                    f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None | 
					
					
						
						| 
							 | 
						            cross_attention_outputs = self.crossattention( | 
					
					
						
						| 
							 | 
						                attention_output, | 
					
					
						
						| 
							 | 
						                attention_mask, | 
					
					
						
						| 
							 | 
						                head_mask, | 
					
					
						
						| 
							 | 
						                encoder_hidden_states, | 
					
					
						
						| 
							 | 
						                encoder_attention_mask, | 
					
					
						
						| 
							 | 
						                cross_attn_past_key_value, | 
					
					
						
						| 
							 | 
						                position_bias, | 
					
					
						
						| 
							 | 
						                output_attentions, | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						            attention_output = cross_attention_outputs[0] | 
					
					
						
						| 
							 | 
						            outputs = outputs + cross_attention_outputs[1:-1]   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            cross_attn_present_key_value = cross_attention_outputs[-1] | 
					
					
						
						| 
							 | 
						            present_key_value = present_key_value + cross_attn_present_key_value | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            attention_output = hidden_states + attention_output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        layer_output = apply_chunking_to_forward( | 
					
					
						
						| 
							 | 
						            self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = (layer_output,) + outputs | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if self.is_decoder: | 
					
					
						
						| 
							 | 
						            outputs = outputs + (present_key_value,) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return outputs | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def feed_forward_chunk(self, attention_output): | 
					
					
						
						| 
							 | 
						        intermediate_inp = attention_output if not self.pre_layer_norm else self.post_attention_ln(attention_output) | 
					
					
						
						| 
							 | 
						        intermediate_output = self.intermediate(intermediate_inp) | 
					
					
						
						| 
							 | 
						        layer_output = self.output(intermediate_output, attention_output) | 
					
					
						
						| 
							 | 
						        if self.pre_layer_norm: | 
					
					
						
						| 
							 | 
						            layer_output = layer_output + attention_output | 
					
					
						
						| 
							 | 
						        return layer_output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertEncoder(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.config = config | 
					
					
						
						| 
							 | 
						        self.pre_layer_norm = getattr(config, 'pre_layer_norm', False) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.last_layer_norm = getattr(config, 'last_layer_norm', self.pre_layer_norm) | 
					
					
						
						| 
							 | 
						        if not self.pre_layer_norm and self.last_layer_norm: | 
					
					
						
						| 
							 | 
						            raise RuntimeError('last_layer_norm could be used only with pre_layer_norm=True') | 
					
					
						
						| 
							 | 
						        self.layer = nn.ModuleList( | 
					
					
						
						| 
							 | 
						                [BertLayer(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_hidden_layers)] | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        self.gradient_checkpointing = False | 
					
					
						
						| 
							 | 
						        if self.pre_layer_norm and self.last_layer_norm: | 
					
					
						
						| 
							 | 
						            self.last_layer_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        hidden_states, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        past_key_values=None, | 
					
					
						
						| 
							 | 
						        use_cache=None, | 
					
					
						
						| 
							 | 
						        output_attentions=False, | 
					
					
						
						| 
							 | 
						        output_hidden_states=False, | 
					
					
						
						| 
							 | 
						        return_dict=True, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        all_hidden_states = () if output_hidden_states else None | 
					
					
						
						| 
							 | 
						        all_self_attentions = () if output_attentions else None | 
					
					
						
						| 
							 | 
						        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None | 
					
					
						
						| 
							 | 
						        position_bias = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        next_decoder_cache = () if use_cache else None | 
					
					
						
						| 
							 | 
						        for i, layer_module in enumerate(self.layer): | 
					
					
						
						| 
							 | 
						            if output_hidden_states: | 
					
					
						
						| 
							 | 
						                all_hidden_states = all_hidden_states + (hidden_states,) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            layer_head_mask = head_mask[i] if head_mask is not None else None | 
					
					
						
						| 
							 | 
						            past_key_value = past_key_values[i] if past_key_values is not None else None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            if self.gradient_checkpointing and self.training: | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                if use_cache: | 
					
					
						
						| 
							 | 
						                    logger.warning( | 
					
					
						
						| 
							 | 
						                        "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | 
					
					
						
						| 
							 | 
						                    ) | 
					
					
						
						| 
							 | 
						                    use_cache = False | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                def create_custom_forward(module): | 
					
					
						
						| 
							 | 
						                    def custom_forward(*inputs): | 
					
					
						
						| 
							 | 
						                        return module(*inputs, past_key_value, position_bias, output_attentions) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                    return custom_forward | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                layer_outputs = torch.utils.checkpoint.checkpoint( | 
					
					
						
						| 
							 | 
						                    create_custom_forward(layer_module), | 
					
					
						
						| 
							 | 
						                    hidden_states, | 
					
					
						
						| 
							 | 
						                    attention_mask, | 
					
					
						
						| 
							 | 
						                    layer_head_mask, | 
					
					
						
						| 
							 | 
						                    encoder_hidden_states, | 
					
					
						
						| 
							 | 
						                    encoder_attention_mask, | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                layer_outputs = layer_module( | 
					
					
						
						| 
							 | 
						                    hidden_states, | 
					
					
						
						| 
							 | 
						                    attention_mask, | 
					
					
						
						| 
							 | 
						                    layer_head_mask, | 
					
					
						
						| 
							 | 
						                    encoder_hidden_states, | 
					
					
						
						| 
							 | 
						                    encoder_attention_mask, | 
					
					
						
						| 
							 | 
						                    past_key_value, | 
					
					
						
						| 
							 | 
						                    position_bias, | 
					
					
						
						| 
							 | 
						                    output_attentions, | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            hidden_states = layer_outputs[0] | 
					
					
						
						| 
							 | 
						            if use_cache: | 
					
					
						
						| 
							 | 
						                next_decoder_cache += (layer_outputs[-1],) | 
					
					
						
						| 
							 | 
						            if output_attentions: | 
					
					
						
						| 
							 | 
						                all_self_attentions = all_self_attentions + (layer_outputs[1],) | 
					
					
						
						| 
							 | 
						                if self.config.add_cross_attention: | 
					
					
						
						| 
							 | 
						                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            if self.config.position_embedding_type == 'relative_attention_bias': | 
					
					
						
						| 
							 | 
						                if not output_attentions: | 
					
					
						
						| 
							 | 
						                    position_bias = layer_outputs[1] | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    position_bias = layer_outputs[2] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.pre_layer_norm and self.last_layer_norm: | 
					
					
						
						| 
							 | 
						            hidden_states = self.last_layer_ln(hidden_states) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if output_hidden_states: | 
					
					
						
						| 
							 | 
						            all_hidden_states = all_hidden_states + (hidden_states,) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            return tuple( | 
					
					
						
						| 
							 | 
						                v | 
					
					
						
						| 
							 | 
						                for v in [ | 
					
					
						
						| 
							 | 
						                    hidden_states, | 
					
					
						
						| 
							 | 
						                    next_decoder_cache, | 
					
					
						
						| 
							 | 
						                    all_hidden_states, | 
					
					
						
						| 
							 | 
						                    all_self_attentions, | 
					
					
						
						| 
							 | 
						                    all_cross_attentions, | 
					
					
						
						| 
							 | 
						                ] | 
					
					
						
						| 
							 | 
						                if v is not None | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        return BaseModelOutputWithPastAndCrossAttentions( | 
					
					
						
						| 
							 | 
						            last_hidden_state=hidden_states, | 
					
					
						
						| 
							 | 
						            past_key_values=next_decoder_cache, | 
					
					
						
						| 
							 | 
						            hidden_states=all_hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=all_self_attentions, | 
					
					
						
						| 
							 | 
						            cross_attentions=all_cross_attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertPooler(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.dense = nn.Linear(config.hidden_size, config.hidden_size) | 
					
					
						
						| 
							 | 
						        self.activation = nn.Tanh() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states): | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        first_token_tensor = hidden_states[:, 0] | 
					
					
						
						| 
							 | 
						        pooled_output = self.dense(first_token_tensor) | 
					
					
						
						| 
							 | 
						        pooled_output = self.activation(pooled_output) | 
					
					
						
						| 
							 | 
						        return pooled_output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertPredictionHeadTransform(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.dense = nn.Linear(config.hidden_size, config.hidden_size) | 
					
					
						
						| 
							 | 
						        if isinstance(config.hidden_act, str): | 
					
					
						
						| 
							 | 
						            self.transform_act_fn = ACT2FN[config.hidden_act] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            self.transform_act_fn = config.hidden_act | 
					
					
						
						| 
							 | 
						        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states): | 
					
					
						
						| 
							 | 
						        hidden_states = self.dense(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.transform_act_fn(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.LayerNorm(hidden_states) | 
					
					
						
						| 
							 | 
						        return hidden_states | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertLMPredictionHead(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.transform = BertPredictionHeadTransform(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bias = nn.Parameter(torch.zeros(config.vocab_size)) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.decoder.bias = self.bias | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, hidden_states): | 
					
					
						
						| 
							 | 
						        hidden_states = self.transform(hidden_states) | 
					
					
						
						| 
							 | 
						        hidden_states = self.decoder(hidden_states) | 
					
					
						
						| 
							 | 
						        return hidden_states | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertOnlyMLMHead(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.predictions = BertLMPredictionHead(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, sequence_output): | 
					
					
						
						| 
							 | 
						        prediction_scores = self.predictions(sequence_output) | 
					
					
						
						| 
							 | 
						        return prediction_scores | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertOnlyNSPHead(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.seq_relationship = nn.Linear(config.hidden_size, 2) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, pooled_output): | 
					
					
						
						| 
							 | 
						        seq_relationship_score = self.seq_relationship(pooled_output) | 
					
					
						
						| 
							 | 
						        return seq_relationship_score | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertPreTrainingHeads(nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        self.predictions = BertLMPredictionHead(config) | 
					
					
						
						| 
							 | 
						        self.seq_relationship = nn.Linear(config.hidden_size, 2) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, sequence_output, pooled_output): | 
					
					
						
						| 
							 | 
						        prediction_scores = self.predictions(sequence_output) | 
					
					
						
						| 
							 | 
						        seq_relationship_score = self.seq_relationship(pooled_output) | 
					
					
						
						| 
							 | 
						        return prediction_scores, seq_relationship_score | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class BertPreTrainedModel(PreTrainedModel): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | 
					
					
						
						| 
							 | 
						    models. | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    config_class = BertConfig | 
					
					
						
						| 
							 | 
						    load_tf_weights = load_tf_weights_in_bert | 
					
					
						
						| 
							 | 
						    base_model_prefix = "bert" | 
					
					
						
						| 
							 | 
						    supports_gradient_checkpointing = True | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_missing = [r"position_ids"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _init_weights(self, module): | 
					
					
						
						| 
							 | 
						        """Initialize the weights""" | 
					
					
						
						| 
							 | 
						        if isinstance(module, nn.Linear): | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            std = self.config.initializer_range | 
					
					
						
						| 
							 | 
						            if hasattr(module, 'bert_output_layer') and self.config.pre_layer_norm: | 
					
					
						
						| 
							 | 
						                std /= math.sqrt(2.0 * self.config.num_hidden_layers) | 
					
					
						
						| 
							 | 
						            module.weight.data.normal_(mean=0.0, std=std) | 
					
					
						
						| 
							 | 
						            if module.bias is not None: | 
					
					
						
						| 
							 | 
						                module.bias.data.zero_() | 
					
					
						
						| 
							 | 
						        elif isinstance(module, nn.Embedding): | 
					
					
						
						| 
							 | 
						            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | 
					
					
						
						| 
							 | 
						            if module.padding_idx is not None: | 
					
					
						
						| 
							 | 
						                module.weight.data[module.padding_idx].zero_() | 
					
					
						
						| 
							 | 
						        elif isinstance(module, nn.LayerNorm): | 
					
					
						
						| 
							 | 
						            module.bias.data.zero_() | 
					
					
						
						| 
							 | 
						            module.weight.data.fill_(1.0) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _set_gradient_checkpointing(self, module, value=False): | 
					
					
						
						| 
							 | 
						        if isinstance(module, BertEncoder): | 
					
					
						
						| 
							 | 
						            module.gradient_checkpointing = value | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@dataclass | 
					
					
						
						| 
							 | 
						class BertForPreTrainingOutput(ModelOutput): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Output type of [`BertForPreTraining`]. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): | 
					
					
						
						| 
							 | 
						            Total loss as the sum of the masked language modeling loss and the next sequence prediction | 
					
					
						
						| 
							 | 
						            (classification) loss. | 
					
					
						
						| 
							 | 
						        mlm_loss: masked language modeling loss | 
					
					
						
						| 
							 | 
						        nsp_loss: next sequence prediction loss | 
					
					
						
						| 
							 | 
						        prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): | 
					
					
						
						| 
							 | 
						            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). | 
					
					
						
						| 
							 | 
						        seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): | 
					
					
						
						| 
							 | 
						            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation | 
					
					
						
						| 
							 | 
						            before SoftMax). | 
					
					
						
						| 
							 | 
						        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): | 
					
					
						
						| 
							 | 
						            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of | 
					
					
						
						| 
							 | 
						            shape `(batch_size, sequence_length, hidden_size)`. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            Hidden-states of the model at the output of each layer plus the initial embedding outputs. | 
					
					
						
						| 
							 | 
						        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): | 
					
					
						
						| 
							 | 
						            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, | 
					
					
						
						| 
							 | 
						            sequence_length)`. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention | 
					
					
						
						| 
							 | 
						            heads. | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    loss: Optional[torch.FloatTensor] = None | 
					
					
						
						| 
							 | 
						    mlm_loss: Optional[torch.FloatTensor] = None | 
					
					
						
						| 
							 | 
						    nsp_loss: Optional[torch.FloatTensor] = None | 
					
					
						
						| 
							 | 
						    prediction_logits: torch.FloatTensor = None | 
					
					
						
						| 
							 | 
						    seq_relationship_logits: torch.FloatTensor = None | 
					
					
						
						| 
							 | 
						    hidden_states: Optional[Tuple[torch.FloatTensor]] = None | 
					
					
						
						| 
							 | 
						    attentions: Optional[Tuple[torch.FloatTensor]] = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						BERT_START_DOCSTRING = r""" | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the | 
					
					
						
						| 
							 | 
						    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads | 
					
					
						
						| 
							 | 
						    etc.) | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. | 
					
					
						
						| 
							 | 
						    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage | 
					
					
						
						| 
							 | 
						    and behavior. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    Parameters: | 
					
					
						
						| 
							 | 
						        config ([`BertConfig`]): Model configuration class with all the parameters of the model. | 
					
					
						
						| 
							 | 
						            Initializing with a config file does not load the weights associated with the model, only the | 
					
					
						
						| 
							 | 
						            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. | 
					
					
						
						| 
							 | 
						""" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						BERT_INPUTS_DOCSTRING = r""" | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        input_ids (`torch.LongTensor` of shape `({0})`): | 
					
					
						
						| 
							 | 
						            Indices of input sequence tokens in the vocabulary. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and | 
					
					
						
						| 
							 | 
						            [`PreTrainedTokenizer.__call__`] for details. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            [What are input IDs?](../glossary#input-ids) | 
					
					
						
						| 
							 | 
						        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): | 
					
					
						
						| 
							 | 
						            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            - 1 for tokens that are **not masked**, | 
					
					
						
						| 
							 | 
						            - 0 for tokens that are **masked**. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            [What are attention masks?](../glossary#attention-mask) | 
					
					
						
						| 
							 | 
						        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): | 
					
					
						
						| 
							 | 
						            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, | 
					
					
						
						| 
							 | 
						            1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            - 0 corresponds to a *sentence A* token, | 
					
					
						
						| 
							 | 
						            - 1 corresponds to a *sentence B* token. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            [What are token type IDs?](../glossary#token-type-ids) | 
					
					
						
						| 
							 | 
						        position_ids (`torch.LongTensor` of shape `({0})`, *optional*): | 
					
					
						
						| 
							 | 
						            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, | 
					
					
						
						| 
							 | 
						            config.max_position_embeddings - 1]`. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            [What are position IDs?](../glossary#position-ids) | 
					
					
						
						| 
							 | 
						        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): | 
					
					
						
						| 
							 | 
						            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            - 1 indicates the head is **not masked**, | 
					
					
						
						| 
							 | 
						            - 0 indicates the head is **masked**. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): | 
					
					
						
						| 
							 | 
						            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This | 
					
					
						
						| 
							 | 
						            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the | 
					
					
						
						| 
							 | 
						            model's internal embedding lookup matrix. | 
					
					
						
						| 
							 | 
						        output_attentions (`bool`, *optional*): | 
					
					
						
						| 
							 | 
						            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | 
					
					
						
						| 
							 | 
						            tensors for more detail. | 
					
					
						
						| 
							 | 
						        output_hidden_states (`bool`, *optional*): | 
					
					
						
						| 
							 | 
						            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for | 
					
					
						
						| 
							 | 
						            more detail. | 
					
					
						
						| 
							 | 
						        return_dict (`bool`, *optional*): | 
					
					
						
						| 
							 | 
						            Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. | 
					
					
						
						| 
							 | 
						""" | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertModel(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of | 
					
					
						
						| 
							 | 
						    cross-attention is added between the self-attention layers, following the architecture described in [Attention is | 
					
					
						
						| 
							 | 
						    all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, | 
					
					
						
						| 
							 | 
						    Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						    To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set | 
					
					
						
						| 
							 | 
						    to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and | 
					
					
						
						| 
							 | 
						    `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config, add_pooling_layer=True): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						        self.config = config | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if hasattr(config, 'sparse_attention'): | 
					
					
						
						| 
							 | 
						            self.is_sparse = True | 
					
					
						
						| 
							 | 
						            self.sparse_block_size = self.config.sparse_attention['block'] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            self.is_sparse = False | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.is_sparse and self.config.is_decoder: | 
					
					
						
						| 
							 | 
						            raise RuntimeError('SparseAttention with BertModel decoder is not currently supported!') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.embeddings = BertEmbeddings(config) | 
					
					
						
						| 
							 | 
						        self.encoder = BertEncoder(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.pooler = BertPooler(config) if add_pooling_layer else None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_input_embeddings(self): | 
					
					
						
						| 
							 | 
						        return self.embeddings.word_embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def set_input_embeddings(self, value): | 
					
					
						
						| 
							 | 
						        self.embeddings.word_embeddings = value | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _prune_heads(self, heads_to_prune): | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base | 
					
					
						
						| 
							 | 
						        class PreTrainedModel | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        for layer, heads in heads_to_prune.items(): | 
					
					
						
						| 
							 | 
						            self.encoder.layer[layer].attention.prune_heads(heads) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=BaseModelOutputWithPoolingAndCrossAttentions, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        past_key_values=None, | 
					
					
						
						| 
							 | 
						        use_cache=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | 
					
					
						
						| 
							 | 
						            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if | 
					
					
						
						| 
							 | 
						            the model is configured as a decoder. | 
					
					
						
						| 
							 | 
						        encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in | 
					
					
						
						| 
							 | 
						            the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            - 1 for tokens that are **not masked**, | 
					
					
						
						| 
							 | 
						            - 0 for tokens that are **masked**. | 
					
					
						
						| 
							 | 
						        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): | 
					
					
						
						| 
							 | 
						            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that | 
					
					
						
						| 
							 | 
						            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all | 
					
					
						
						| 
							 | 
						            `decoder_input_ids` of shape `(batch_size, sequence_length)`. | 
					
					
						
						| 
							 | 
						        use_cache (`bool`, *optional*): | 
					
					
						
						| 
							 | 
						            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see | 
					
					
						
						| 
							 | 
						            `past_key_values`). | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
					
						
						| 
							 | 
						        output_hidden_states = ( | 
					
					
						
						| 
							 | 
						            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.config.is_decoder: | 
					
					
						
						| 
							 | 
						            use_cache = use_cache if use_cache is not None else self.config.use_cache | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            use_cache = False | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if input_ids is not None and inputs_embeds is not None: | 
					
					
						
						| 
							 | 
						            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | 
					
					
						
						| 
							 | 
						        elif input_ids is not None: | 
					
					
						
						| 
							 | 
						            input_shape = input_ids.size() | 
					
					
						
						| 
							 | 
						        elif inputs_embeds is not None: | 
					
					
						
						| 
							 | 
						            input_shape = inputs_embeds.size()[:-1] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            raise ValueError("You have to specify either input_ids or inputs_embeds") | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        batch_size, seq_length = input_shape | 
					
					
						
						| 
							 | 
						        device = input_ids.device if input_ids is not None else inputs_embeds.device | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if self.is_sparse and seq_length % self.sparse_block_size != 0: | 
					
					
						
						| 
							 | 
						            raise RuntimeError(f'BertModel with sparse attention is used, but seq_len = {seq_length} ' | 
					
					
						
						| 
							 | 
						                               f'is not divisible by block_size = {self.sparse_block_size}') | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if attention_mask is None: | 
					
					
						
						| 
							 | 
						            attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if token_type_ids is None: | 
					
					
						
						| 
							 | 
						            if hasattr(self.embeddings, "token_type_ids"): | 
					
					
						
						| 
							 | 
						                buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] | 
					
					
						
						| 
							 | 
						                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) | 
					
					
						
						| 
							 | 
						                token_type_ids = buffered_token_type_ids_expanded | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if self.config.is_decoder and encoder_hidden_states is not None: | 
					
					
						
						| 
							 | 
						            encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() | 
					
					
						
						| 
							 | 
						            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) | 
					
					
						
						| 
							 | 
						            if encoder_attention_mask is None: | 
					
					
						
						| 
							 | 
						                encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) | 
					
					
						
						| 
							 | 
						            encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            encoder_extended_attention_mask = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        embedding_output = self.embeddings( | 
					
					
						
						| 
							 | 
						            input_ids=input_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            past_key_values_length=past_key_values_length, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        encoder_outputs = self.encoder( | 
					
					
						
						| 
							 | 
						            embedding_output, | 
					
					
						
						| 
							 | 
						            attention_mask=extended_attention_mask, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            encoder_hidden_states=encoder_hidden_states, | 
					
					
						
						| 
							 | 
						            encoder_attention_mask=encoder_extended_attention_mask, | 
					
					
						
						| 
							 | 
						            past_key_values=past_key_values, | 
					
					
						
						| 
							 | 
						            use_cache=use_cache, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        sequence_output = encoder_outputs[0] | 
					
					
						
						| 
							 | 
						        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            return (sequence_output, pooled_output) + encoder_outputs[1:] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return BaseModelOutputWithPoolingAndCrossAttentions( | 
					
					
						
						| 
							 | 
						            last_hidden_state=sequence_output, | 
					
					
						
						| 
							 | 
						            pooler_output=pooled_output, | 
					
					
						
						| 
							 | 
						            past_key_values=encoder_outputs.past_key_values, | 
					
					
						
						| 
							 | 
						            hidden_states=encoder_outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=encoder_outputs.attentions, | 
					
					
						
						| 
							 | 
						            cross_attentions=encoder_outputs.cross_attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next | 
					
					
						
						| 
							 | 
						    sentence prediction (classification)` head. | 
					
					
						
						| 
							 | 
						    """, | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForPreTraining(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config) | 
					
					
						
						| 
							 | 
						        self.cls = BertPreTrainingHeads(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_output_embeddings(self): | 
					
					
						
						| 
							 | 
						        return self.cls.predictions.decoder | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def set_output_embeddings(self, new_embeddings): | 
					
					
						
						| 
							 | 
						        self.cls.predictions.decoder = new_embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        next_sentence_label=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						                Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., | 
					
					
						
						| 
							 | 
						                config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), | 
					
					
						
						| 
							 | 
						                the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | 
					
					
						
						| 
							 | 
						            next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						                Labels for computing the next sequence prediction (classification) loss. Input should be a sequence | 
					
					
						
						| 
							 | 
						                pair (see `input_ids` docstring) Indices should be in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						                - 0 indicates sequence B is a continuation of sequence A, | 
					
					
						
						| 
							 | 
						                - 1 indicates sequence B is a random sequence. | 
					
					
						
						| 
							 | 
						            kwargs (`Dict[str, any]`, optional, defaults to *{}*): | 
					
					
						
						| 
							 | 
						                Used to hide legacy arguments that have been deprecated. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Example: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        ```python | 
					
					
						
						| 
							 | 
						        >>> from transformers import BertTokenizer, BertForPreTraining | 
					
					
						
						| 
							 | 
						        >>> import torch | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | 
					
					
						
						| 
							 | 
						        >>> model = BertForPreTraining.from_pretrained("bert-base-uncased") | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") | 
					
					
						
						| 
							 | 
						        >>> outputs = model(**inputs) | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> prediction_logits = outputs.prediction_logits | 
					
					
						
						| 
							 | 
						        >>> seq_relationship_logits = outputs.seq_relationship_logits | 
					
					
						
						| 
							 | 
						        ``` | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output, pooled_output = outputs[:2] | 
					
					
						
						| 
							 | 
						        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        total_loss = None | 
					
					
						
						| 
							 | 
						        masked_lm_loss = None | 
					
					
						
						| 
							 | 
						        next_sentence_loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None and next_sentence_label is not None: | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) | 
					
					
						
						| 
							 | 
						            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) | 
					
					
						
						| 
							 | 
						            total_loss = masked_lm_loss + next_sentence_loss | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (prediction_scores, seq_relationship_score) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((total_loss,) + output) if total_loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return BertForPreTrainingOutput( | 
					
					
						
						| 
							 | 
						            loss=total_loss, | 
					
					
						
						| 
							 | 
						            mlm_loss=masked_lm_loss, | 
					
					
						
						| 
							 | 
						            nsp_loss=next_sentence_loss, | 
					
					
						
						| 
							 | 
						            prediction_logits=prediction_scores, | 
					
					
						
						| 
							 | 
						            seq_relationship_logits=seq_relationship_score, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertLMHeadModel(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_unexpected = [r"pooler"] | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not config.is_decoder: | 
					
					
						
						| 
							 | 
						            logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`") | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config, add_pooling_layer=False) | 
					
					
						
						| 
							 | 
						        self.cls = BertOnlyMLMHead(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_output_embeddings(self): | 
					
					
						
						| 
							 | 
						        return self.cls.predictions.decoder | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def set_output_embeddings(self, new_embeddings): | 
					
					
						
						| 
							 | 
						        self.cls.predictions.decoder = new_embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        past_key_values=None, | 
					
					
						
						| 
							 | 
						        use_cache=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						            encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | 
					
					
						
						| 
							 | 
						                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention | 
					
					
						
						| 
							 | 
						                if the model is configured as a decoder. | 
					
					
						
						| 
							 | 
						            encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						                Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used | 
					
					
						
						| 
							 | 
						                in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						                - 1 for tokens that are **not masked**, | 
					
					
						
						| 
							 | 
						                - 0 for tokens that are **masked**. | 
					
					
						
						| 
							 | 
						            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						                Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be | 
					
					
						
						| 
							 | 
						                in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` | 
					
					
						
						| 
							 | 
						                are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., | 
					
					
						
						| 
							 | 
						                config.vocab_size]` | 
					
					
						
						| 
							 | 
						            past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): | 
					
					
						
						| 
							 | 
						                Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up | 
					
					
						
						| 
							 | 
						                decoding. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those | 
					
					
						
						| 
							 | 
						                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of | 
					
					
						
						| 
							 | 
						                all `decoder_input_ids` of shape `(batch_size, sequence_length)`. | 
					
					
						
						| 
							 | 
						            use_cache (`bool`, *optional*): | 
					
					
						
						| 
							 | 
						                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding | 
					
					
						
						| 
							 | 
						                (see `past_key_values`). | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Example: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        ```python | 
					
					
						
						| 
							 | 
						        >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig | 
					
					
						
						| 
							 | 
						        >>> import torch | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased") | 
					
					
						
						| 
							 | 
						        >>> config = BertConfig.from_pretrained("bert-base-cased") | 
					
					
						
						| 
							 | 
						        >>> config.is_decoder = True | 
					
					
						
						| 
							 | 
						        >>> model = BertLMHeadModel.from_pretrained("bert-base-cased", config=config) | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") | 
					
					
						
						| 
							 | 
						        >>> outputs = model(**inputs) | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> prediction_logits = outputs.logits | 
					
					
						
						| 
							 | 
						        ``` | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						            use_cache = False | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            encoder_hidden_states=encoder_hidden_states, | 
					
					
						
						| 
							 | 
						            encoder_attention_mask=encoder_attention_mask, | 
					
					
						
						| 
							 | 
						            past_key_values=past_key_values, | 
					
					
						
						| 
							 | 
						            use_cache=use_cache, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output = outputs[0] | 
					
					
						
						| 
							 | 
						        prediction_scores = self.cls(sequence_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        lm_loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() | 
					
					
						
						| 
							 | 
						            labels = labels[:, 1:].contiguous() | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						            lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (prediction_scores,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((lm_loss,) + output) if lm_loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return CausalLMOutputWithCrossAttentions( | 
					
					
						
						| 
							 | 
						            loss=lm_loss, | 
					
					
						
						| 
							 | 
						            logits=prediction_scores, | 
					
					
						
						| 
							 | 
						            past_key_values=outputs.past_key_values, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						            cross_attentions=outputs.cross_attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): | 
					
					
						
						| 
							 | 
						        input_shape = input_ids.shape | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if attention_mask is None: | 
					
					
						
						| 
							 | 
						            attention_mask = input_ids.new_ones(input_shape) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if past is not None: | 
					
					
						
						| 
							 | 
						            input_ids = input_ids[:, -1:] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _reorder_cache(self, past, beam_idx): | 
					
					
						
						| 
							 | 
						        reordered_past = () | 
					
					
						
						| 
							 | 
						        for layer_past in past: | 
					
					
						
						| 
							 | 
						            reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) | 
					
					
						
						| 
							 | 
						        return reordered_past | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING) | 
					
					
						
						| 
							 | 
						class BertForMaskedLM(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_unexpected = [r"pooler"] | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if config.is_decoder: | 
					
					
						
						| 
							 | 
						            logger.warning( | 
					
					
						
						| 
							 | 
						                "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for " | 
					
					
						
						| 
							 | 
						                "bi-directional self-attention." | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config, add_pooling_layer=False) | 
					
					
						
						| 
							 | 
						        self.cls = BertOnlyMLMHead(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_output_embeddings(self): | 
					
					
						
						| 
							 | 
						        return self.cls.predictions.decoder | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def set_output_embeddings(self, new_embeddings): | 
					
					
						
						| 
							 | 
						        self.cls.predictions.decoder = new_embeddings | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=MaskedLMOutput, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        encoder_hidden_states=None, | 
					
					
						
						| 
							 | 
						        encoder_attention_mask=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., | 
					
					
						
						| 
							 | 
						            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the | 
					
					
						
						| 
							 | 
						            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            encoder_hidden_states=encoder_hidden_states, | 
					
					
						
						| 
							 | 
						            encoder_attention_mask=encoder_attention_mask, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output = outputs[0] | 
					
					
						
						| 
							 | 
						        prediction_scores = self.cls(sequence_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        masked_lm_loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss()   | 
					
					
						
						| 
							 | 
						            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (prediction_scores,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return MaskedLMOutput( | 
					
					
						
						| 
							 | 
						            loss=masked_lm_loss, | 
					
					
						
						| 
							 | 
						            logits=prediction_scores, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): | 
					
					
						
						| 
							 | 
						        input_shape = input_ids.shape | 
					
					
						
						| 
							 | 
						        effective_batch_size = input_shape[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if self.config.pad_token_id is None: | 
					
					
						
						| 
							 | 
						            raise ValueError("The PAD token should be defined for generation") | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) | 
					
					
						
						| 
							 | 
						        dummy_token = torch.full( | 
					
					
						
						| 
							 | 
						            (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        input_ids = torch.cat([input_ids, dummy_token], dim=1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return {"input_ids": input_ids, "attention_mask": attention_mask} | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """Bert Model with a `next sentence prediction (classification)` head on top.""", | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForNextSentencePrediction(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config) | 
					
					
						
						| 
							 | 
						        self.cls = BertOnlyNSPHead(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						        **kwargs, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair | 
					
					
						
						| 
							 | 
						            (see `input_ids` docstring). Indices should be in `[0, 1]`: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						            - 0 indicates sequence B is a continuation of sequence A, | 
					
					
						
						| 
							 | 
						            - 1 indicates sequence B is a random sequence. | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        Example: | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        ```python | 
					
					
						
						| 
							 | 
						        >>> from transformers import BertTokenizer, BertForNextSentencePrediction | 
					
					
						
						| 
							 | 
						        >>> import torch | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | 
					
					
						
						| 
							 | 
						        >>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased") | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." | 
					
					
						
						| 
							 | 
						        >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." | 
					
					
						
						| 
							 | 
						        >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						        >>> outputs = model(**encoding, labels=torch.LongTensor([1])) | 
					
					
						
						| 
							 | 
						        >>> logits = outputs.logits | 
					
					
						
						| 
							 | 
						        >>> assert logits[0, 0] < logits[0, 1]  # next sentence was random | 
					
					
						
						| 
							 | 
						        ``` | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if "next_sentence_label" in kwargs: | 
					
					
						
						| 
							 | 
						            warnings.warn( | 
					
					
						
						| 
							 | 
						                "The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.", | 
					
					
						
						| 
							 | 
						                FutureWarning, | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						            labels = kwargs.pop("next_sentence_label") | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        pooled_output = outputs[1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        seq_relationship_scores = self.cls(pooled_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        next_sentence_loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						            next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (seq_relationship_scores,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return NextSentencePredictorOutput( | 
					
					
						
						| 
							 | 
						            loss=next_sentence_loss, | 
					
					
						
						| 
							 | 
						            logits=seq_relationship_scores, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled | 
					
					
						
						| 
							 | 
						    output) e.g. for GLUE tasks. | 
					
					
						
						| 
							 | 
						    """, | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForSequenceClassification(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						        self.num_labels = config.num_labels | 
					
					
						
						| 
							 | 
						        self.config = config | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config) | 
					
					
						
						| 
							 | 
						        classifier_dropout = ( | 
					
					
						
						| 
							 | 
						            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(classifier_dropout) | 
					
					
						
						| 
							 | 
						        self.classifier = nn.Linear(config.hidden_size, config.num_labels) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=SequenceClassifierOutput, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        pos_weight=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., | 
					
					
						
						| 
							 | 
						            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If | 
					
					
						
						| 
							 | 
						            `config.num_labels > 1` a classification loss is computed (Cross-Entropy). | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        pooled_output = outputs[1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        pooled_output = self.dropout(pooled_output) | 
					
					
						
						| 
							 | 
						        logits = self.classifier(pooled_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if self.config.problem_type is None: | 
					
					
						
						| 
							 | 
						                if self.num_labels == 1: | 
					
					
						
						| 
							 | 
						                    self.config.problem_type = "regression" | 
					
					
						
						| 
							 | 
						                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | 
					
					
						
						| 
							 | 
						                    self.config.problem_type = "single_label_classification" | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    self.config.problem_type = "multi_label_classification" | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if self.config.problem_type == "regression": | 
					
					
						
						| 
							 | 
						                loss_fct = MSELoss() | 
					
					
						
						| 
							 | 
						                if self.num_labels == 1: | 
					
					
						
						| 
							 | 
						                    loss = loss_fct(logits.squeeze(), labels.squeeze()) | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    loss = loss_fct(logits, labels) | 
					
					
						
						| 
							 | 
						            elif self.config.problem_type == "single_label_classification": | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | 
					
					
						
						| 
							 | 
						            elif self.config.problem_type == "multi_label_classification": | 
					
					
						
						| 
							 | 
						                loss_fct = BCEWithLogitsLoss(pos_weight=pos_weight) | 
					
					
						
						| 
							 | 
						                loss = loss_fct(logits, labels) | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (logits,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((loss,) + output) if loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return SequenceClassifierOutput( | 
					
					
						
						| 
							 | 
						            loss=loss, | 
					
					
						
						| 
							 | 
						            logits=logits, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a | 
					
					
						
						| 
							 | 
						    softmax) e.g. for RocStories/SWAG tasks. | 
					
					
						
						| 
							 | 
						    """, | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForMultipleChoice(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config) | 
					
					
						
						| 
							 | 
						        classifier_dropout = ( | 
					
					
						
						| 
							 | 
						            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(classifier_dropout) | 
					
					
						
						| 
							 | 
						        self.classifier = nn.Linear(config.hidden_size, 1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=MultipleChoiceModelOutput, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., | 
					
					
						
						| 
							 | 
						            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See | 
					
					
						
						| 
							 | 
						            `input_ids` above) | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None | 
					
					
						
						| 
							 | 
						        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None | 
					
					
						
						| 
							 | 
						        token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None | 
					
					
						
						| 
							 | 
						        position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None | 
					
					
						
						| 
							 | 
						        inputs_embeds = ( | 
					
					
						
						| 
							 | 
						            inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) | 
					
					
						
						| 
							 | 
						            if inputs_embeds is not None | 
					
					
						
						| 
							 | 
						            else None | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        pooled_output = outputs[1] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        pooled_output = self.dropout(pooled_output) | 
					
					
						
						| 
							 | 
						        logits = self.classifier(pooled_output) | 
					
					
						
						| 
							 | 
						        reshaped_logits = logits.view(-1, num_choices) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						            loss = loss_fct(reshaped_logits, labels) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (reshaped_logits,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((loss,) + output) if loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return MultipleChoiceModelOutput( | 
					
					
						
						| 
							 | 
						            loss=loss, | 
					
					
						
						| 
							 | 
						            logits=reshaped_logits, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for | 
					
					
						
						| 
							 | 
						    Named-Entity-Recognition (NER) tasks. | 
					
					
						
						| 
							 | 
						    """, | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForTokenClassification(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_unexpected = [r"pooler"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						        self.num_labels = config.num_labels | 
					
					
						
						| 
							 | 
						        self.config = config | 
					
					
						
						| 
							 | 
						        if getattr(self.config, 'problem_type', None) is None: | 
					
					
						
						| 
							 | 
						            self.config.problem_type = 'single_label_classification' | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config, add_pooling_layer=False) | 
					
					
						
						| 
							 | 
						        classifier_dropout = ( | 
					
					
						
						| 
							 | 
						            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        self.dropout = nn.Dropout(classifier_dropout) | 
					
					
						
						| 
							 | 
						        self.classifier = nn.Linear(config.hidden_size, config.num_labels) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=TokenClassifierOutput, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        labels=None, | 
					
					
						
						| 
							 | 
						        labels_mask=None, | 
					
					
						
						| 
							 | 
						        pos_weight=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output = outputs[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output = self.dropout(sequence_output) | 
					
					
						
						| 
							 | 
						        logits = self.classifier(sequence_output) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        loss = None | 
					
					
						
						| 
							 | 
						        if labels is not None: | 
					
					
						
						| 
							 | 
						            if self.config.problem_type == 'single_label_classification': | 
					
					
						
						| 
							 | 
						                loss_fct = CrossEntropyLoss() | 
					
					
						
						| 
							 | 
						                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | 
					
					
						
						| 
							 | 
						            elif self.config.problem_type == 'multi_label_classification': | 
					
					
						
						| 
							 | 
						                if labels_mask is None: | 
					
					
						
						| 
							 | 
						                    loss_fct = BCEWithLogitsLoss(pos_weight=pos_weight) | 
					
					
						
						| 
							 | 
						                    loss = loss_fct(logits, labels) | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    loss_fct = BCEWithLogitsLoss(reduction='none', pos_weight=pos_weight) | 
					
					
						
						| 
							 | 
						                    loss = loss_fct(logits, labels) | 
					
					
						
						| 
							 | 
						                    loss = loss * labels_mask.unsqueeze(-1) | 
					
					
						
						| 
							 | 
						                    loss = loss.sum() / labels_mask.sum() if labels_mask.sum() != 0.0 else torch.tensor(0.0, device=logits.device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (logits,) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((loss,) + output) if loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return TokenClassifierOutput( | 
					
					
						
						| 
							 | 
						            loss=loss, | 
					
					
						
						| 
							 | 
						            logits=logits, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@add_start_docstrings( | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear | 
					
					
						
						| 
							 | 
						    layers on top of the hidden-states output to compute `span start logits` and `span end logits`). | 
					
					
						
						| 
							 | 
						    """, | 
					
					
						
						| 
							 | 
						    BERT_START_DOCSTRING, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						class BertForQuestionAnswering(BertPreTrainedModel): | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    _keys_to_ignore_on_load_unexpected = [r"pooler"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__(self, config): | 
					
					
						
						| 
							 | 
						        super().__init__(config) | 
					
					
						
						| 
							 | 
						        self.num_labels = config.num_labels | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.bert = BertModel(config, add_pooling_layer=False) | 
					
					
						
						| 
							 | 
						        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.post_init() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) | 
					
					
						
						| 
							 | 
						    @add_code_sample_docstrings( | 
					
					
						
						| 
							 | 
						        processor_class=_TOKENIZER_FOR_DOC, | 
					
					
						
						| 
							 | 
						        checkpoint=_CHECKPOINT_FOR_DOC, | 
					
					
						
						| 
							 | 
						        output_type=QuestionAnsweringModelOutput, | 
					
					
						
						| 
							 | 
						        config_class=_CONFIG_FOR_DOC, | 
					
					
						
						| 
							 | 
						    ) | 
					
					
						
						| 
							 | 
						    def forward( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        input_ids=None, | 
					
					
						
						| 
							 | 
						        attention_mask=None, | 
					
					
						
						| 
							 | 
						        token_type_ids=None, | 
					
					
						
						| 
							 | 
						        position_ids=None, | 
					
					
						
						| 
							 | 
						        head_mask=None, | 
					
					
						
						| 
							 | 
						        inputs_embeds=None, | 
					
					
						
						| 
							 | 
						        start_positions=None, | 
					
					
						
						| 
							 | 
						        end_positions=None, | 
					
					
						
						| 
							 | 
						        output_attentions=None, | 
					
					
						
						| 
							 | 
						        output_hidden_states=None, | 
					
					
						
						| 
							 | 
						        return_dict=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for position (index) of the start of the labelled span for computing the token classification loss. | 
					
					
						
						| 
							 | 
						            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence | 
					
					
						
						| 
							 | 
						            are not taken into account for computing the loss. | 
					
					
						
						| 
							 | 
						        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | 
					
					
						
						| 
							 | 
						            Labels for position (index) of the end of the labelled span for computing the token classification loss. | 
					
					
						
						| 
							 | 
						            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence | 
					
					
						
						| 
							 | 
						            are not taken into account for computing the loss. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        outputs = self.bert( | 
					
					
						
						| 
							 | 
						            input_ids, | 
					
					
						
						| 
							 | 
						            attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            token_type_ids=token_type_ids, | 
					
					
						
						| 
							 | 
						            position_ids=position_ids, | 
					
					
						
						| 
							 | 
						            head_mask=head_mask, | 
					
					
						
						| 
							 | 
						            inputs_embeds=inputs_embeds, | 
					
					
						
						| 
							 | 
						            output_attentions=output_attentions, | 
					
					
						
						| 
							 | 
						            output_hidden_states=output_hidden_states, | 
					
					
						
						| 
							 | 
						            return_dict=return_dict, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sequence_output = outputs[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        logits = self.qa_outputs(sequence_output) | 
					
					
						
						| 
							 | 
						        start_logits, end_logits = logits.split(1, dim=-1) | 
					
					
						
						| 
							 | 
						        start_logits = start_logits.squeeze(-1).contiguous() | 
					
					
						
						| 
							 | 
						        end_logits = end_logits.squeeze(-1).contiguous() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        total_loss = None | 
					
					
						
						| 
							 | 
						        if start_positions is not None and end_positions is not None: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            if len(start_positions.size()) > 1: | 
					
					
						
						| 
							 | 
						                start_positions = start_positions.squeeze(-1) | 
					
					
						
						| 
							 | 
						            if len(end_positions.size()) > 1: | 
					
					
						
						| 
							 | 
						                end_positions = end_positions.squeeze(-1) | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            ignored_index = start_logits.size(1) | 
					
					
						
						| 
							 | 
						            start_positions = start_positions.clamp(0, ignored_index) | 
					
					
						
						| 
							 | 
						            end_positions = end_positions.clamp(0, ignored_index) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            loss_fct = CrossEntropyLoss(ignore_index=ignored_index) | 
					
					
						
						| 
							 | 
						            start_loss = loss_fct(start_logits, start_positions) | 
					
					
						
						| 
							 | 
						            end_loss = loss_fct(end_logits, end_positions) | 
					
					
						
						| 
							 | 
						            total_loss = (start_loss + end_loss) / 2 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            output = (start_logits, end_logits) + outputs[2:] | 
					
					
						
						| 
							 | 
						            return ((total_loss,) + output) if total_loss is not None else output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return QuestionAnsweringModelOutput( | 
					
					
						
						| 
							 | 
						            loss=total_loss, | 
					
					
						
						| 
							 | 
						            start_logits=start_logits, | 
					
					
						
						| 
							 | 
						            end_logits=end_logits, | 
					
					
						
						| 
							 | 
						            hidden_states=outputs.hidden_states, | 
					
					
						
						| 
							 | 
						            attentions=outputs.attentions, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						class RotaryEmbedding(torch.nn.Module): | 
					
					
						
						| 
							 | 
						    def __init__(self, dim, base=10000): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) | 
					
					
						
						| 
							 | 
						        self.register_buffer("inv_freq", inv_freq) | 
					
					
						
						| 
							 | 
						        self.seq_len_cached = None | 
					
					
						
						| 
							 | 
						        self.cos_cached = None | 
					
					
						
						| 
							 | 
						        self.sin_cached = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def forward(self, x, seq_dim=1, seq_len=None): | 
					
					
						
						| 
							 | 
						        if seq_len is None: | 
					
					
						
						| 
							 | 
						            seq_len = x.shape[seq_dim] | 
					
					
						
						| 
							 | 
						        if seq_len != self.seq_len_cached: | 
					
					
						
						| 
							 | 
						            self.seq_len_cached = seq_len | 
					
					
						
						| 
							 | 
						            t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) | 
					
					
						
						| 
							 | 
						            freqs = torch.einsum("i,j->ij", t, self.inv_freq) | 
					
					
						
						| 
							 | 
						            emb = torch.cat((freqs, freqs), dim=-1).to(x.device) | 
					
					
						
						| 
							 | 
						            self.cos_cached = emb.cos()[None, None, :, :] | 
					
					
						
						| 
							 | 
						            self.sin_cached = emb.sin()[None, None, :, :] | 
					
					
						
						| 
							 | 
						        return self.cos_cached, self.sin_cached | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def rotate_half(x): | 
					
					
						
						| 
							 | 
						    x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2:] | 
					
					
						
						| 
							 | 
						    return torch.cat((-x2, x1), dim=x1.ndim - 1)   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): | 
					
					
						
						| 
							 | 
						    cos, sin = cos[:, :, offset: q.shape[2] + offset, :], sin[:, :, offset: q.shape[2] + offset, :] | 
					
					
						
						| 
							 | 
						    return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) | 
					
					
						
						| 
							 | 
						
 |