mirror of https://github.com/YifanXu74/MQ-Det.git
56 lines
3.7 KiB
Python
56 lines
3.7 KiB
Python
# hacked from transformers.modeling_outputs. Only for custom output like gate_values
|
|
from dataclasses import dataclass
|
|
from typing import Optional, Tuple
|
|
|
|
import torch
|
|
|
|
from transformers.utils import ModelOutput
|
|
|
|
|
|
@dataclass
|
|
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
|
|
"""
|
|
Base class for model's outputs that also contains a pooling of the last hidden states.
|
|
|
|
Args:
|
|
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
|
|
Last layer hidden-state of the first token of the sequence (classification token) after further processing
|
|
through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
|
|
the classification token after processing through a linear layer and a tanh activation function. The linear
|
|
layer weights are trained from the next sentence prediction (classification) objective during pretraining.
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
sequence_length)`.
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
heads.
|
|
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
sequence_length)`.
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
weighted average in the cross-attention heads.
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
|
input) to speed up sequential decoding.
|
|
"""
|
|
|
|
last_hidden_state: torch.FloatTensor = None
|
|
pooler_output: torch.FloatTensor = None
|
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
|
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None |