目录
一、损失函数
https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss
L1Loss和MSELoss
注意loss function输入shape和输出shape即可。
import torch
from torch.nn import L1Loss
from torch import nn
inputs = torch.tensor([1, 2, 3], dtype=torch.float32) # float才能进行loss运算
targets = torch.tensor([1, 2, 5], dtype=torch.float32)
print("inputs:{}".format(inputs)) # inputs:tensor([1., 2., 3.])
print("targets:{}".format(targets)) # targets:tensor([1., 2., 5.])
inputs = torch.reshape(inputs, (1, 1, 1, 3))
targets = torch.reshape(targets, (1, 1, 1, 3))
print("inputs:{}".format(inputs)) # inputs:tensor([[[[1., 2., 3.]]]])
print("targets:{}".format(targets)) # targets:tensor([[[[1., 2., 5.]]]])
loss = L1Loss(reduction='sum')
result = loss(inputs, targets)
loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs, targets)
print(result) # tensor(2.)
print(result_mse) # tensor(1.3333)
Layers层输入多了C,代表类别。
计算交叉熵
x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x, (1, 3)) # (N,C)
print("x:{}".format(x)) # x:tensor([[0.1000, 0.2000, 0.3000]])
print("y:{}".format(y)) # y:tensor([1])
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x, y)
print(result_cross) # tensor(1.1019)
完整代码如下,
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
# 定义交叉熵
loss = nn.CrossEntropyLoss()
tudui = Tudui()
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets)
print(result_loss)
二、反向传播
# 定义交叉熵
loss = nn.CrossEntropyLoss()
tudui = Tudui()
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets)
print(result_loss)
# 反向传播要使用loss
result_loss.backward()
三、优化器
https://pytorch.org/docs/stable/optim.html
减小loss的作用。优化器根据反向传播的loss来对参数进行优化,达到对loss降低的目的。
- step()方法:利用grad来对参数进行更新。
for input, target in dataset:
optimizer.zero_grad() # 清零上一步产生的数据
output = model(input)
loss = loss_fn(output, target)
loss.backward()
optimizer.step() # 参数调整
完整的代码,
# -*- coding: utf-8 -*-
import torch
import torchvision
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=1)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
loss = nn.CrossEntropyLoss()
# 搭建网络,网络的参数tudui.parameters()
tudui = Tudui()
# 创建or定义一个优化器
optim = torch.optim.SGD(tudui.parameters(), lr=0.01)
scheduler = StepLR(optim, step_size=5, gamma=0.1) # 调整学习速率
for epoch in range(20):
running_loss = 0.0
# 没有外层,则模型只看了数据一次。
for data in dataloader:
imgs, targets = data
outputs = tudui(imgs)
result_loss = loss(outputs, targets)
# 网络中参数梯度清零
optim.zero_grad()
# 反向传播,求出节点的梯度
result_loss.backward()
# 调用优化器,对参数进行调优
scheduler.step()
# 每轮训练,在整体数据,每一个图片上的loss求和
running_loss = running_loss + result_loss
print(running_loss) # loss不断减小
总结
如何调整学习速率
前期设置比较大,快速找到。后期设置的比较小,寻找更好的结果。
scheduler = StepLR(optim, step_size=5, gamma=0.1) # 调整学习速率