pytorch基础知识五

numpy实现dropout与L1,L2正则化请参考我另一篇博客

https://blog.****.net/fanzonghao/article/details/81079757

pytorch使用dropout与L2 

import torch
import matplotlib.pyplot as plt
torch.manual_seed(1)    # Sets the seed for generating random numbers.reproducible

N_SAMPLES = 20
N_HIDDEN = 300

# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
print('x.size()',x.size())

# torch.normal(mean, std, out=None) → Tensor
y = x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
print(y.shape)
print(y)
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))

# show data
plt.scatter(x.numpy(), y.numpy(), c='red', s=50, alpha=0.5, label='train')
plt.scatter(test_x.numpy(), test_y.numpy(), c='blue', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()


net_overfitting = torch.nn.Sequential(
    torch.nn.Linear(1,N_HIDDEN),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDEN,N_HIDDEN),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDEN,1),
)

net_dropped = torch.nn.Sequential(
    torch.nn.Linear(1,N_HIDDEN),
    torch.nn.Dropout(0.5), # 0.5的概率失活
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDEN,N_HIDDEN),
    torch.nn.Dropout(0.5),
    torch.nn.ReLU(),
    torch.nn.Linear(N_HIDDEN,1),
)

#no dropout
optimizer_ofit = torch.optim.Adam(net_overfitting.parameters(), lr=0.001)
#add dropout
optimizer_drop = torch.optim.Adam(net_dropped.parameters(), lr=0.01)
#add l2 penalty weight_decay
# optimizer_ofit = torch.optim.Adam(net_overfitting.parameters(), lr=0.001,weight_decay=0.001)
loss = torch.nn.MSELoss()

for epoch in range(500):
    pred_ofit = net_overfitting(x)
    loss_ofit = loss(pred_ofit, y)
    optimizer_ofit.zero_grad()
    loss_ofit.backward()
    optimizer_ofit.step()

    #DROP OUT
    pred_drop = net_dropped(x)
    loss_drop = loss(pred_drop, y)
    optimizer_drop.zero_grad()
    loss_drop.backward()
    optimizer_drop.step()

    if epoch % 250 == 0:
        net_overfitting.eval()  # 将神经网络转换成测试形式,此时不会对神经网络dropout
        net_dropped.eval()  # 此时不会对神经网络dropout

        test_pred_ofit = net_overfitting(test_x)
        test_pred_drop = net_dropped(test_x)

        # show data
        plt.scatter(x.numpy(), y.numpy(), c='red', s=50, alpha=0.5, label='train')
        plt.scatter(test_x.numpy(), test_y.numpy(), c='blue', s=50, alpha=0.5, label='test')
        plt.plot(test_x.numpy(), test_pred_ofit.detach().numpy(), 'r-', lw=3, label='overfitting')
        plt.plot(test_x.numpy(), test_pred_drop.detach().numpy(), 'b--', lw=3, label='L2')
        plt.text(0, -1.2, 'overfitting loss=%.4f' % loss(test_pred_ofit, test_y).detach().numpy(),
                 fontdict={'size': 20, 'color': 'red'})
        plt.text(0, -1.5, 'L2 loss=%.4f' % loss(test_pred_drop, test_y).detach().numpy(),
                 fontdict={'size': 20, 'color': 'blue'})
        plt.legend(loc='upper left')
        plt.ylim((-2.5, 2.5))
        plt.pause(0.1)

        net_overfitting.train()
        net_dropped.train()

plt.ioff()
plt.show()

数据:

pytorch基础知识五

使用dropout对比:可看出使用dropout具有防止过拟合的作用。

pytorch基础知识五

使用L2对比:可看出使用L2也具有防止过拟合作用。

pytorch基础知识五