文章目录
两个反向传播
背景
设计一个网络存在两个阶段,第一阶段的输出作为第二阶段的输入,并且在两个阶段的结束都分别有引入监督。此时不想简单将两个阶段的损失相加送入网络训练,而是将两个阶段的损失分别用来训练两个子网络。其实也可以理解为两个网络级联。
代码实现
import torch
import torch.nn as nn
from torch.optim import Adam
epoch = 100
learning_rate = 1e-3
# 子网络1
myNet_1 = nn.Conv2d(32, 32, 3, 1, 1)
optimizer_1 = Adam(myNet_1.parameters(), lr=learning_rate)
loss_fun_1 = nn.MSELoss()
# 子网络2
myNet_2 = nn.Conv2d(32, 32, 3, 1, 1)
optimizer_2 = Adam(myNet_2.parameters(), lr=learning_rate)
loss_fun_2 = nn.L1Loss()
x = torch.randn([10, 32, 64, 64])
y = torch.randn([10, 32, 64, 64])
# 开始训练
for i in range(epoch):
output_1 = myNet_1(x)
loss_1 = loss_fun_1(output_1, y)
optimizer_1.zero_grad()
loss_1.backward() # 反向传播
optimizer_1.step()
output_2 = myNet_2(output_1.detach()) # 子网络1的输出与网路解耦
loss_2 = loss_fun_1(output_2, y)
optimizer_2.zero_grad()
loss_2.backward()
optimizer_2.step()
可学习参数
背景
网络存在多个输入,不同输入对于最终结果的贡献不同,如果设置可学习的参数来使网络自己学习合适的权重大小应该可以提升网络表现。
代码实现
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
# 网络结构
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 设置可学习权值
self.w1 = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.w2 = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
# 初始化权值
self.w1.data.fill_(0.5)
self.w2.data.fill_(0.5)
# 两个卷积层
self.conv1 = nn.Conv2d(32, 32, 3, 1, 1)
self.conv2 = nn.Conv2d(32, 32, 3, 1, 1)
def forward(self, x):
x = self.w1 * self.conv1(x) + self.w2 * self.conv2(x)
return F.relu(x)
epoch = 100
learning_rate = 1e-3
myNet = Net()
optimizer = Adam(myNet.parameters(), lr=learning_rate)
loss_fun = nn.MSELoss()
x = torch.randn([10, 32, 64, 64])
y = torch.randn([10, 32, 64, 64])
# 开始训练
for i in range(epoch):
output = myNet(x)
loss = loss_fun(output, y)
optimizer.zero_grad()
loss.backward() # 反向传播
optimizer.step()
print(f'权值1:{myNet.w1.item()}')
print(f'权值2:{myNet.w2.item()}')