隐藏单元在PyTorch中的seq2seq模型中饱和

隐藏单元在PyTorch中的seq2seq模型中饱和

问题描述:

我正试图在PyTorch中编写一个非常简单的机器翻译玩具示例。为了简单的问题,我打开机器翻译任务到这一个:隐藏单元在PyTorch中的seq2seq模型中饱和

给定一个随机序列([4, 8, 9 ...]),预测其元素的元素加1([5, 9, 10, ...])的序列。 Id:0, 1, 2将分别用作pad, bos, eos

我在我的机器翻译任务中发现了这个玩具任务中的同样的问题。为了调试,我使用了非常小的数据大小n_data = 50,并发现该模型可以而不是甚至过度使用这些数据。纵观该模型,我发现encoder/decoder即将的隐藏状态变得饱和,即全部由于tanh隐藏状态变得非常接近1/-1

-0.8987 0.9634 0.9993 ... -0.8930 -0.4822 -0.9960 
-0.9673 1.0000 -0.8007 ... 0.9929 -0.9992 0.9990 
-0.9457 0.9290 -0.9260 ... -0.9932 0.9851 0.9980 
      ...    ⋱    ... 
-0.9995 0.9997 -0.9350 ... -0.9820 -0.9942 -0.9913 
-0.9951 0.9488 -0.8894 ... -0.9842 -0.9895 -0.9116 
-0.9991 0.9769 -0.5871 ... 0.7557 0.9049 0.9881 

另外,无论我怎样调整学习速度,或这些单元切换到RNN/LSTM/GRU单元,所述损耗值似乎低与50测试样品甚至约束。随着更多的数据,该模型似乎根本不会收敛。

step: 0, loss: 2.313938 
step: 10, loss: 1.435780 
step: 20, loss: 0.779704 
step: 30, loss: 0.395590 
step: 40, loss: 0.281261 
... 
step: 480, loss: 0.231419 
step: 490, loss: 0.231410 

当我使用tensorflow,我可以很容易地使用seq2seq模型过度拟合这样的数据集,并有一个非常小的损耗值。

这里是什么我已经试过:

  1. 手动初始化嵌入到非常小的数字;
  2. 将渐变裁剪为固定标准,如1e-2,2,3,5,10;
  3. 计算损失时,不包括填充索引(通过将ignore_index添加到NLLLoss)。

所有我试过的都没有帮助解决问题。

我该如何摆脱这个?任何帮助将不胜感激。

下面是代码,更好的阅读体验,它在gist

#!/usr/bin/env python3 
# -*- coding: utf-8 -*- 

import numpy as np 
import torch 
import torch.nn.functional as F 
from torch import nn 
from torch.autograd import Variable 

np.random.seed(0) 
torch.manual_seed(0) 

_RECURRENT_FN_MAPPING = { 
    'rnn': torch.nn.RNN, 
    'gru': torch.nn.GRU, 
    'lstm': torch.nn.LSTM, 
} 


def get_recurrent_cell(n_inputs, 
         num_units, 
         num_layers, 
         type_, 
         dropout=0.0, 
         bidirectional=False): 
    cls = _RECURRENT_FN_MAPPING.get(type_) 

    return cls(
     n_inputs, 
     num_units, 
     num_layers, 
     dropout=dropout, 
     bidirectional=bidirectional) 


class Recurrent(nn.Module): 

    def __init__(self, 
       num_units, 
       num_layers=1, 
       unit_type='gru', 
       bidirectional=False, 
       dropout=0.0, 
       embedding=None, 
       attn_type='general'): 
     super(Recurrent, self).__init__() 

     num_inputs = embedding.weight.size(1) 
     self._num_inputs = num_inputs 
     self._num_units = num_units 
     self._num_layers = num_layers 
     self._unit_type = unit_type 
     self._bidirectional = bidirectional 
     self._dropout = dropout 
     self._embedding = embedding 
     self._attn_type = attn_type 
     self._cell_fn = get_recurrent_cell(num_inputs, num_units, num_layers, 
              unit_type, dropout, bidirectional) 

    def init_hidden(self, batch_size): 
     direction = 1 if not self._bidirectional else 2 
     h = Variable(
      torch.zeros(direction * self._num_layers, batch_size, 
         self._num_units)) 
     if self._unit_type == 'lstm': 
      return (h, h.clone()) 
     else: 
      return h 

    def forward(self, x, h, len_x): 
     # Sort by sequence lengths 
     sorted_indices = np.argsort(-len_x).tolist() 
     unsorted_indices = np.argsort(sorted_indices).tolist() 
     x = x[:, sorted_indices] 
     h = h[:, sorted_indices, :] 
     len_x = len_x[sorted_indices].tolist() 

     embedded = self._embedding(x) 
     packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, len_x) 

     if self._unit_type == 'lstm': 
      o, (h, c) = self._cell_fn(packed, h) 
      o, _ = torch.nn.utils.rnn.pad_packed_sequence(o) 
      return (o[:, unsorted_indices, :], (h[:, unsorted_indices, :], 
               c[:, unsorted_indices, :])) 
     else: 
      o, hh = self._cell_fn(packed, h) 
      o, _ = torch.nn.utils.rnn.pad_packed_sequence(o) 
      return (o[:, unsorted_indices, :], hh[:, unsorted_indices, :]) 


class Encoder(Recurrent): 
    pass 


class Decoder(Recurrent): 
    pass 


class Seq2Seq(nn.Module): 

    def __init__(self, encoder, decoder, num_outputs): 
     super(Seq2Seq, self).__init__() 
     self._encoder = encoder 
     self._decoder = decoder 
     self._out = nn.Linear(decoder._num_units, num_outputs) 

    def forward(self, x, y, h, len_x, len_y): 
     # Encode 
     _, h = self._encoder(x, h, len_x) 
     # Decode 
     o, h = self._decoder(y, h, len_y) 
     # Project 
     o = self._out(o) 

     return F.log_softmax(o) 


def load_data(size, 
       min_len=5, 
       max_len=15, 
       min_word=3, 
       max_word=100, 
       epoch=10, 
       batch_size=64, 
       pad=0, 
       bos=1, 
       eos=2): 
    src = [ 
     np.random.randint(min_word, max_word - 1, 
          np.random.randint(min_len, max_len)).tolist() 
     for _ in range(size) 
    ] 
    tgt_in = [[bos] + [xi + 1 for xi in x] for x in src] 
    tgt_out = [[xi + 1 for xi in x] + [eos] for x in src] 

    def _pad(batch): 
     max_len = max(len(x) for x in batch) 
     return np.asarray(
      [ 
       np.pad(
        x, (0, max_len - len(x)), 
        mode='constant', 
        constant_values=pad) for x in batch 
      ], 
      dtype=np.int64) 

    def _len(batch): 
     return np.asarray([len(x) for x in batch], dtype=np.int64) 

    for e in range(epoch): 
     batch_start = 0 

     while batch_start < size: 
      batch_end = batch_start + batch_size 

      s, ti, to = (src[batch_start:batch_end], 
         tgt_in[batch_start:batch_end], 
         tgt_out[batch_start:batch_end]) 
      lens, lent = _len(s), _len(ti) 

      s, ti, to = _pad(s).T, _pad(ti).T, _pad(to).T 

      yield (Variable(torch.LongTensor(s)), 
        Variable(torch.LongTensor(ti)), 
        Variable(torch.LongTensor(to)), lens, lent) 

      batch_start += batch_size 


def print_sample(x, y, yy): 
    x = x.data.numpy().T 
    y = y.data.numpy().T 
    yy = yy.data.numpy().T 

    for u, v, w in zip(x, y, yy): 
     print('--------') 
     print('S: ', u) 
     print('T: ', v) 
     print('P: ', w) 


n_data = 50 
min_len = 5 
max_len = 10 
vocab_size = 101 
n_samples = 5 

epoch = 100000 
batch_size = 32 
lr = 1e-2 
clip = 3 

emb_size = 50 
hidden_size = 50 
num_layers = 1 
max_length = 15 

src_embed = torch.nn.Embedding(vocab_size, emb_size) 
tgt_embed = torch.nn.Embedding(vocab_size, emb_size) 

eps = 1e-3 
src_embed.weight.data.uniform_(-eps, eps) 
tgt_embed.weight.data.uniform_(-eps, eps) 

enc = Encoder(hidden_size, num_layers, embedding=src_embed) 
dec = Decoder(hidden_size, num_layers, embedding=tgt_embed) 
net = Seq2Seq(enc, dec, vocab_size) 

optimizer = torch.optim.Adam(net.parameters(), lr=lr) 
criterion = torch.nn.NLLLoss() 

loader = load_data(
    n_data, 
    min_len=min_len, 
    max_len=max_len, 
    max_word=vocab_size, 
    epoch=epoch, 
    batch_size=batch_size) 

for i, (x, yin, yout, lenx, leny) in enumerate(loader): 
    net.train() 
    optimizer.zero_grad() 

    logits = net(x, yin, enc.init_hidden(x.size()[1]), lenx, leny) 
    loss = criterion(logits.view(-1, vocab_size), yout.contiguous().view(-1)) 

    loss.backward() 

    torch.nn.utils.clip_grad_norm(net.parameters(), clip) 
    optimizer.step() 

    if i % 10 == 0: 
     print('step: {}, loss: {:.6f}'.format(i, loss.data[0])) 

    if i % 200 == 0 and i > 0: 
     net.eval() 
     x, yin, yout, lenx, leny = (x[:, :n_samples], yin[:, :n_samples], 
            yout[:, :n_samples], lenx[:n_samples], 
            leny[:n_samples]) 
     outputs = net(x, yin, enc.init_hidden(x.size()[1]), lenx, leny) 
     _, preds = torch.max(outputs, 2) 
     print_sample(x, yout, preds) 

我觉得你是不是在正切哪里会给出合理的输出,因为你的投入是相当大/小的范围内工作,从而导致1/-1值。例如tanh(5)= 0.999和tanh(-5)= - 0.999。尝试在tanh可以处理的范围内规范化数据,而不会进入极端(比如在+1到-1之间)。如果激活功能是乙状结肠,这将是更好的0和1之间

Tanh

+0

我试图初始化输入嵌入到极少数(约1E-4),但什么都没有改变数据标准化.. – Edityouprofile

+0

我不认为你已经使数据更接近于0,但是将范围从1到-1的数据标准化。你可以使用min-max规范来做到这一点。 – Shehroz

+0

我试过了,但我没有更新这个问题。对不起。 – Edityouprofile