架构以及架构中的组件
架构以及架构中的组件
- Transform
Transform
以下的代码包含:
(图片来源网络,侵删)- 标准化的示例
- 残差化的示例
# huggingface # transformers # https://www.bilibili.com/video/BV1At4y1W75x?spm_id_from=333.999.0.0 import copy import math from collections import namedtuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable Hypothesis = namedtuple('Hypothesis', ['value', 'score']) def clones(module, n): return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) """ 实现x 的标准化处理(标准化的作用:使x符合正太分布) """ class LayerNorm(nn.Module): def __init__(self, feature, eps=1e-6): """ :param feature: self-attention 的 x 的大小 :param eps: """ super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(feature)) self.b_2 = nn.Parameter(torch.zeros(feature)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 """ 残差化的示例 """ class SublayerConnection(nn.Module): """ 这不仅仅做了残差,这是把残差和 layernorm 一起给做了 """ def __init__(self, size, dropout=0.1): super(SublayerConnection, self).__init__() # 第一步做 layernorm 这是类的实例化的一种方法 self.layer_norm = LayerNorm(size) # 第二步做 dropout self.dropout = nn.Dropout(p=dropout) def forward(self, x, sublayer): """ :param x: 就是self-attention的输入 :param sublayer: self-attention层 :return: """ return self.dropout(self.layer_norm(x + sublayer(x))) class FeatEmbedding(nn.Module): def __init__(self, d_feat, d_model, dropout): super(FeatEmbedding, self).__init__() self.video_embeddings = nn.Sequential( LayerNorm(d_feat), nn.Dropout(dropout), nn.Linear(d_feat, d_model)) def forward(self, x): return self.video_embeddings(x) class TextEmbedding(nn.Module): def __init__(self, vocab_size, d_model): super(TextEmbedding, self).__init__() self.d_model = d_model self.embed = nn.Embedding(vocab_size, d_model) def forward(self, x): return self.embed(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): def __init__(self, dim, dropout, max_len=5000): if dim % 2 != 0: raise ValueError("Cannot use sin/cos positional encoding with " "odd dim (got dim={:d})".format(dim)) pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(1) super(PositionalEncoding, self).__init__() self.register_buffer('pe', pe) self.drop_out = nn.Dropout(p=dropout) self.dim = dim def forward(self, emb, step=None): emb = emb * math.sqrt(self.dim) if step is None: emb = emb + self.pe[:emb.size(0)] else: emb = emb + self.pe[step] emb = self.drop_out(emb) return emb """ 自注意力机制的实现示例 """ def self_attention(query, key, value, dropout=None, mask=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # mask的操作在QK之后,softmax之前 if mask is not None: mask.cuda() scores = scores.masked_fill(mask == 0, -1e9) self_attn = F.softmax(scores, dim=-1) if dropout is not None: self_attn = dropout(self_attn) return torch.matmul(self_attn, value), self_attn """ 多头--注意力机制的实现示例 """ class MultiHeadAttention(nn.Module): def __init__(self, head, d_model, dropout=0.1): super(MultiHeadAttention, self).__init__() assert (d_model % head == 0) self.d_k = d_model // head self.head = head self.d_model = d_model self.linear_query = nn.Linear(d_model, d_model) self.linear_key = nn.Linear(d_model, d_model) self.linear_value = nn.Linear(d_model, d_model) self.linear_out = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(p=dropout) self.attn = None def forward(self, query, key, value, mask=None): if mask is not None: # 多头注意力机制的线性变换层是4维,是把query[batch, frame_num, d_model]变成[batch, -1, head, d_k] # 再1,2维交换变成[batch, head, -1, d_k], 所以mask要在第一维添加一维,与后面的self attention计算维度一样 mask = mask.unsqueeze(1) n_batch = query.size(0) # if self.head == 1: # x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask) # else: # query = self.linear_query(query).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 32, 64] # key = self.linear_key(key).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 28, 64] # value = self.linear_value(value).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 28, 64] # # x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask) # # 变为三维, 或者说是concat head # x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.head * self.d_k) query = self.linear_query(query).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 32, 64] key = self.linear_key(key).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 28, 64] value = self.linear_value(value).view(n_batch, -1, self.head, self.d_k).transpose(1, 2) # [b, 8, 28, 64] x, self.attn = self_attention(query, key, value, dropout=self.dropout, mask=mask) # 变为三维, 或者说是concat head x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.head * self.d_k) return self.linear_out(x) class PositionWiseFeedForward(nn.Module): def __init__(self, d_model, d_ff, dropout=0.1): super(PositionWiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.dropout_1 = nn.Dropout(dropout) self.relu = nn.ReLU() self.dropout_2 = nn.Dropout(dropout) def forward(self, x): inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output class EncoderLayer(nn.Module): def __init__(self, size, attn, feed_forward, dropout=0.1): super(EncoderLayer, self).__init__() self.attn = attn self.feed_forward = feed_forward self.sublayer_connection = clones(SublayerConnection(size, dropout), 2) def forward(self, x, mask): x = self.sublayer_connection[0](x, lambda x: self.attn(x, x, x, mask)) return self.sublayer_connection[1](x, self.feed_forward) class EncoderLayerNoAttention(nn.Module): def __init__(self, size, attn, feed_forward, dropout=0.1): super(EncoderLayerNoAttention, self).__init__() self.attn = attn self.feed_forward = feed_forward self.sublayer_connection = clones(SublayerConnection(size, dropout), 2) def forward(self, x, mask): return self.sublayer_connection[1](x, self.feed_forward) class DecoderLayer(nn.Module): def __init__(self, size, attn, feed_forward, sublayer_num, dropout=0.1): super(DecoderLayer, self).__init__() self.attn = attn self.feed_forward = feed_forward self.sublayer_connection = clones(SublayerConnection(size, dropout), sublayer_num) def forward(self, x, memory, src_mask, trg_mask, r2l_memory=None, r2l_trg_mask=None): x = self.sublayer_connection[0](x, lambda x: self.attn(x, x, x, trg_mask)) x = self.sublayer_connection[1](x, lambda x: self.attn(x, memory, memory, src_mask)) if r2l_memory is not None: x = self.sublayer_connection[-2](x, lambda x: self.attn(x, r2l_memory, r2l_memory, r2l_trg_mask)) return self.sublayer_connection[-1](x, self.feed_forward) class Encoder(nn.Module): def __init__(self, n, encoder_layer): super(Encoder, self).__init__() self.encoder_layer = clones(encoder_layer, n) def forward(self, x, src_mask): for layer in self.encoder_layer: x = layer(x, src_mask) return x class R2L_Decoder(nn.Module): def __init__(self, n, decoder_layer): super(R2L_Decoder, self).__init__() self.decoder_layer = clones(decoder_layer, n) def forward(self, x, memory, src_mask, r2l_trg_mask): for layer in self.decoder_layer: x = layer(x, memory, src_mask, r2l_trg_mask) return x class L2R_Decoder(nn.Module): def __init__(self, n, decoder_layer): super(L2R_Decoder, self).__init__() self.decoder_layer = clones(decoder_layer, n) def forward(self, x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask): for layer in self.decoder_layer: x = layer(x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask) return x def pad_mask(src, r2l_trg, trg, pad_idx): if isinstance(src, tuple): if len(src) == 4: src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1) src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1) src_object_mask = (src[2][:, :, 0] != pad_idx).unsqueeze(1) src_rel_mask = (src[3][:, :, 0] != pad_idx).unsqueeze(1) enc_src_mask = (src_image_mask, src_motion_mask, src_object_mask, src_rel_mask) dec_src_mask_1 = src_image_mask & src_motion_mask dec_src_mask_2 = src_image_mask & src_motion_mask & src_object_mask & src_rel_mask dec_src_mask = (dec_src_mask_1, dec_src_mask_2) src_mask = (enc_src_mask, dec_src_mask) if len(src) == 3: src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1) src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1) src_object_mask = (src[2][:, :, 0] != pad_idx).unsqueeze(1) enc_src_mask = (src_image_mask, src_motion_mask, src_object_mask) dec_src_mask = src_image_mask & src_motion_mask src_mask = (enc_src_mask, dec_src_mask) if len(src) == 2: src_image_mask = (src[0][:, :, 0] != pad_idx).unsqueeze(1) src_motion_mask = (src[1][:, :, 0] != pad_idx).unsqueeze(1) enc_src_mask = (src_image_mask, src_motion_mask) dec_src_mask = src_image_mask & src_motion_mask src_mask = (enc_src_mask, dec_src_mask) else: src_mask = (src[:, :, 0] != pad_idx).unsqueeze(1) if trg is not None: if isinstance(src_mask, tuple): trg_mask = (trg != pad_idx).unsqueeze(1) & subsequent_mask(trg.size(1)).type_as(src_image_mask.data) r2l_pad_mask = (r2l_trg != pad_idx).unsqueeze(1).type_as(src_image_mask.data) r2l_trg_mask = r2l_pad_mask & subsequent_mask(r2l_trg.size(1)).type_as(src_image_mask.data) return src_mask, r2l_pad_mask, r2l_trg_mask, trg_mask else: trg_mask = (trg != pad_idx).unsqueeze(1) & subsequent_mask(trg.size(1)).type_as(src_mask.data) r2l_pad_mask = (r2l_trg != pad_idx).unsqueeze(1).type_as(src_mask.data) r2l_trg_mask = r2l_pad_mask & subsequent_mask(r2l_trg.size(1)).type_as(src_mask.data) return src_mask, r2l_pad_mask, r2l_trg_mask, trg_mask # src_mask[batch, 1, lens] trg_mask[batch, 1, lens] else: return src_mask def subsequent_mask(size): """Mask out subsequent positions.""" attn_shape = (1, size, size) mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return (torch.from_numpy(mask) == 0).cuda() class Generator(nn.Module): def __init__(self, d_model, vocab_size): super(Generator, self).__init__() self.linear = nn.Linear(d_model, vocab_size) def forward(self, x): return F.log_softmax(self.linear(x), dim=-1) class ABDTransformer(nn.Module): def __init__(self, vocab, d_feat, d_model, d_ff, n_heads, n_layers, dropout, feature_mode, device='cuda', n_heads_big=128): super(ABDTransformer, self).__init__() self.vocab = vocab self.device = device self.feature_mode = feature_mode c = copy.deepcopy # attn_no_heads = MultiHeadAttention(1, d_model, dropout) attn = MultiHeadAttention(n_heads, d_model, dropout) attn_big = MultiHeadAttention(n_heads_big, d_model, dropout) # attn_big2 = MultiHeadAttention(10, d_model, dropout) feed_forward = PositionWiseFeedForward(d_model, d_ff) if feature_mode == 'one': self.src_embed = FeatEmbedding(d_feat, d_model, dropout) elif feature_mode == 'two': self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout) self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout) elif feature_mode == 'three': self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout) self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout) self.object_src_embed = FeatEmbedding(d_feat[2], d_model, dropout) elif feature_mode == 'four': self.image_src_embed = FeatEmbedding(d_feat[0], d_model, dropout) self.motion_src_embed = FeatEmbedding(d_feat[1], d_model, dropout) self.object_src_embed = FeatEmbedding(d_feat[2], d_model, dropout) self.rel_src_embed = FeatEmbedding(d_feat[3], d_model, dropout) self.trg_embed = TextEmbedding(vocab.n_vocabs, d_model) self.pos_embed = PositionalEncoding(d_model, dropout) # self.encoder_no_heads = Encoder(n_layers, EncoderLayer(d_model, c(attn_no_heads), c(feed_forward), dropout)) self.encoder = Encoder(n_layers, EncoderLayer(d_model, c(attn), c(feed_forward), dropout)) self.encoder_big = Encoder(n_layers, EncoderLayer(d_model, c(attn_big), c(feed_forward), dropout)) # self.encoder_big2 = Encoder(n_layers, EncoderLayer(d_model, c(attn_big2), c(feed_forward), dropout)) self.encoder_no_attention = Encoder(n_layers, EncoderLayerNoAttention(d_model, c(attn), c(feed_forward), dropout)) self.r2l_decoder = R2L_Decoder(n_layers, DecoderLayer(d_model, c(attn), c(feed_forward), sublayer_num=3, dropout=dropout)) self.l2r_decoder = L2R_Decoder(n_layers, DecoderLayer(d_model, c(attn), c(feed_forward), sublayer_num=4, dropout=dropout)) self.generator = Generator(d_model, vocab.n_vocabs) def encode(self, src, src_mask, feature_mode_two=False): if self.feature_mode == 'two': x1 = self.image_src_embed(src[0]) x1 = self.pos_embed(x1) x1 = self.encoder_big(x1, src_mask[0]) x2 = self.motion_src_embed(src[1]) x2 = self.pos_embed(x2) x2 = self.encoder_big(x2, src_mask[1]) return x1 + x2 if feature_mode_two: x1 = self.image_src_embed(src[0]) x1 = self.pos_embed(x1) x1 = self.encoder_big(x1, src_mask[0]) x2 = self.motion_src_embed(src[1]) x2 = self.pos_embed(x2) x2 = self.encoder_big(x2, src_mask[1]) return x1 + x2 if self.feature_mode == 'one': x = self.src_embed(src) x = self.pos_embed(x) return self.encoder(x, src_mask) elif self.feature_mode == 'two': x1 = self.image_src_embed(src[0]) x1 = self.pos_embed(x1) x1 = self.encoder_big(x1, src_mask[0]) x2 = self.motion_src_embed(src[1]) x2 = self.pos_embed(x2) x2 = self.encoder_big(x2, src_mask[1]) return x1 + x2 elif self.feature_mode == 'three': x1 = self.image_src_embed(src[0]) x1 = self.pos_embed(x1) x1 = self.encoder(x1, src_mask[0]) x2 = self.motion_src_embed(src[1]) x2 = self.pos_embed(x2) x2 = self.encoder(x2, src_mask[1]) x3 = self.object_src_embed(src[2]) x3 = self.pos_embed(x3) x3 = self.encoder(x3, src_mask[2]) return x1 + x2 + x3 elif self.feature_mode == 'four': x1 = self.image_src_embed(src[0]) x1 = self.pos_embed(x1) x1 = self.encoder(x1, src_mask[0]) x2 = self.motion_src_embed(src[1]) x2 = self.pos_embed(x2) x2 = self.encoder(x2, src_mask[1]) x3 = self.object_src_embed(src[2]) # x3 = self.pos_embed(x3) x3 = self.encoder(x3, src_mask[2]) # x3 = self.encoder_no_attention(x3, src_mask[2]) x4 = self.rel_src_embed(src[3]) # x4 = self.pos_embed(x4) # x4 = self.encoder_no_ # heads(x4, src_mask[3]) x4 = self.encoder_no_attention(x4, src_mask[3]) # x4 = self.encoder(x4, src_mask[3]) return x1 + x2 + x3 + x4 def r2l_decode(self, r2l_trg, memory, src_mask, r2l_trg_mask): x = self.trg_embed(r2l_trg) x = self.pos_embed(x) return self.r2l_decoder(x, memory, src_mask, r2l_trg_mask) def l2r_decode(self, trg, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask): x = self.trg_embed(trg) x = self.pos_embed(x) return self.l2r_decoder(x, memory, src_mask, trg_mask, r2l_memory, r2l_trg_mask) def forward(self, src, r2l_trg, trg, mask): src_mask, r2l_pad_mask, r2l_trg_mask, trg_mask = mask if self.feature_mode == 'one': encoding_outputs = self.encode(src, src_mask) r2l_outputs = self.r2l_decode(r2l_trg, encoding_outputs, src_mask, r2l_trg_mask) l2r_outputs = self.l2r_decode(trg, encoding_outputs, src_mask, trg_mask, r2l_outputs, r2l_pad_mask) elif self.feature_mode == 'two' or 'three' or 'four': enc_src_mask, dec_src_mask = src_mask r2l_encoding_outputs = self.encode(src, enc_src_mask, feature_mode_two=True) encoding_outputs = self.encode(src, enc_src_mask) r2l_outputs = self.r2l_decode(r2l_trg, r2l_encoding_outputs, dec_src_mask[0], r2l_trg_mask) l2r_outputs = self.l2r_decode(trg, encoding_outputs, dec_src_mask[1], trg_mask, r2l_outputs, r2l_pad_mask) # r2l_outputs = self.r2l_decode(r2l_trg, encoding_outputs, dec_src_mask, r2l_trg_mask) # l2r_outputs = self.l2r_decode(trg, encoding_outputs, dec_src_mask, trg_mask, None, None) else: raise "没有输出" r2l_pred = self.generator(r2l_outputs) l2r_pred = self.generator(l2r_outputs) return r2l_pred, l2r_pred def greedy_decode(self, batch_size, src_mask, memory, max_len): eos_idx = self.vocab.word2idx[''] r2l_hidden = None with torch.no_grad(): output = torch.ones(batch_size, 1).fill_(eos_idx).long().cuda() for i in range(max_len + 2 - 1): trg_mask = subsequent_mask(output.size(1)) dec_out = self.r2l_decode(output, memory, src_mask, trg_mask) # batch, len, d_model r2l_hidden = dec_out pred = self.generator(dec_out) # batch, len, n_vocabs next_word = pred[:, -1].max(dim=-1)[1].unsqueeze(1) # pred[:, -1]([batch, n_vocabs]) output = torch.cat([output, next_word], dim=-1) return r2l_hidden, output # beam search 必用的 def r2l_beam_search_decode(self, batch_size, src, src_mask, model_encodings, beam_size, max_len): end_symbol = self.vocab.word2idx[''] start_symbol = self.vocab.word2idx[''] r2l_outputs = None # 1.1 Setup Src "src has shape (batch_size, sent_len)" "src_mask has shape (batch_size, 1, sent_len)" # src_mask = (src[:, :, 0] != self.vocab.word2idx['']).unsqueeze(-2) # TODO Untested "model_encodings has shape (batch_size, sentence_len, d_model)" # model_encodings = self.encode(src, src_mask) # 1.2 Setup Tgt Hypothesis Tracking "hypothesis is List(4 bt)[(cur beam_sz, dec_sent_len)], init: List(4 bt)[(1 init_beam_sz, dec_sent_len)]" "hypotheses[i] is shape (cur beam_sz, dec_sent_len)" hypotheses = [copy.deepcopy(torch.full((1, 1), start_symbol, dtype=torch.long, device=self.device)) for _ in range(batch_size)] "List after init: List 4 bt of List of len max_len_completed, init: List of len 4 bt of []" completed_hypotheses = [copy.deepcopy([]) for _ in range(batch_size)] "List len batch_sz of shape (cur beam_sz), init: List(4 bt)[(1 init_beam_sz)]" "hyp_scores[i] is shape (cur beam_sz)" hyp_scores = [copy.deepcopy(torch.full((1,), 0, dtype=torch.float, device=self.device)) for _ in range(batch_size)] # probs are log_probs must be init at 0. # 2. Iterate: Generate one char at a time until maxlen for iter in range(max_len + 1): if all([len(completed_hypotheses[i]) == beam_size for i in range(batch_size)]): break # 2.1 Setup the batch. Since we use beam search, each batch has a variable number (called cur_beam_size) # between 0 and beam_size of hypotheses live at any moment. We decode all hypotheses for all batches at # the same time, so we must copy the src_encodings, src_mask, etc the appropriate number fo times for # the number of hypotheses for each example. We keep track of the number of live hypotheses for each example. # We run all hypotheses for all examples together through the decoder and log-softmax, # and then use `torch.split` to get the appropriate number of hypotheses for each example in the end. cur_beam_sizes, last_tokens, model_encodings_l, src_mask_l = [], [], [], [] for i in range(batch_size): if hypotheses[i] is None: cur_beam_sizes += [0] continue cur_beam_size, decoded_len = hypotheses[i].shape cur_beam_sizes += [cur_beam_size] last_tokens += [hypotheses[i]] model_encodings_l += [model_encodings[i:i + 1]] * cur_beam_size src_mask_l += [src_mask[i:i + 1]] * cur_beam_size "shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)" model_encodings_cur = torch.cat(model_encodings_l, dim=0) src_mask_cur = torch.cat(src_mask_l, dim=0) y_tm1 = torch.cat(last_tokens, dim=0) "shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 128 d_model)" if self.feature_mode == 'one': out = self.r2l_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur, Variable(subsequent_mask(y_tm1.size(-1)).type_as(src.data)).to(self.device)) elif self.feature_mode == 'two' or 'three' or 'four': out = self.r2l_decode(Variable(y_tm1).to(self.device), model_encodings_cur, src_mask_cur, Variable(subsequent_mask(y_tm1.size(-1)).type_as(src[0].data)).to(self.device)) r2l_outputs = out "shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)" log_prob = self.generator(out[:, -1, :]).unsqueeze(1) "shape (sum(4 bt * cur_beam_sz_i), 1 dec_sent_len, 50002 vocab_sz)" _, decoded_len, vocab_sz = log_prob.shape # log_prob = log_prob.reshape(batch_size, cur_beam_size, decoded_len, vocab_sz) "shape List(4 bt)[(cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)]" "log_prob[i] is (cur_beam_sz_i, dec_sent_len, 50002 vocab_sz)" log_prob = torch.split(log_prob, cur_beam_sizes, dim=0) # 2.2 Now we process each example in the batch. Note that the example may have already finished processing before # other examples (no more hypotheses to try), in which case we continue new_hypotheses, new_hyp_scores = [], [] for i in range(batch_size): if hypotheses[i] is None or len(completed_hypotheses[i]) >= beam_size: new_hypotheses += [None] new_hyp_scores += [None] continue # 2.2.1 We compute the cumulative scores for each live hypotheses for the example # hyp_scores is the old scores for the previous stage, and `log_prob` are the new probs for # this stage. Since they are log probs, we sum them instaed of multiplying them. # The .view(-1) forces all the hypotheses into one dimension. The shape of this dimension is # cur_beam_sz * vocab_sz (ex: 5 * 50002). So after getting the topk from it, we can recover the # generating sentence and the next word using: ix // vocab_sz, ix % vocab_sz. cur_beam_sz_i, dec_sent_len, vocab_sz = log_prob[i].shape "shape (vocab_sz,)" cumulative_hyp_scores_i = (hyp_scores[i].unsqueeze(-1).unsqueeze(-1) .expand((cur_beam_sz_i, 1, vocab_sz)) + log_prob[i]).view(-1) # 2.2.2 We get the topk values in cumulative_hyp_scores_i and compute the current (generating) sentence # and the next word using: ix // vocab_sz, ix % vocab_sz. "shape (cur_beam_sz,)" live_hyp_num_i = beam_size - len(completed_hypotheses[i]) "shape (cur_beam_sz,). Vals are between 0 and 50002 vocab_sz" top_cand_hyp_scores, top_cand_hyp_pos = torch.topk(cumulative_hyp_scores_i, k=live_hyp_num_i) "shape (cur_beam_sz,). prev_hyp_ids vals are 0
- Transform
文章版权声明:除非注明,否则均为主机测评原创文章,转载或复制请以超链接形式并注明出处。