constant scaling代码:
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, transform=transform, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, transform=transform, download=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)
class ResidualBlock(nn.Module):
# Residual Block需要保证输出和输入通道数x一样
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
# 3*3卷积核,保证图像大小不变将padding设为1
# 第一个卷积
self.conv1 = nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
# 第二个卷积
self.conv2 = nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
def forward(self, x):
# 激活
y = F.relu(self.conv1(x))
y = self.conv2(y)
# 先求和 后激活
z = (x + y) * 0.5
return F.relu(z)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
self.mp = nn.MaxPool2d(2)
self.rblock1 = ResidualBlock(16)
self.rblock2 = ResidualBlock(32)
self.fc = torch.nn.Linear(512, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.rblock1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.rblock2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
net = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, targets = data
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
# forward
y_pred = net(inputs)
# backward
loss = criterion(y_pred, targets)
loss.backward()
# update
optimizer.step()
running_loss += loss.item()
if (batch_idx % 300 == 299):
print("[%d,%d]loss:%.3f" % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
accuracy = []
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (labels == predicted).sum().item()
print("accuracy on test set:%d %% [%d/%d]" % (100 * correct / total, correct, total))
accuracy.append(100 * correct / total)
if __name__ == "__main__":
for epoch in range(10):
train(epoch)
test()
plt.plot(range(10), accuracy)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.show()
print("done")
运行结果:
conv shortcut代码:
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, transform=transform, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, transform=transform, download=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)
class ResidualBlock(nn.Module):
# Residual Block需要保证输出和输入通道数x一样
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.channels = channels
# 3*3卷积核,保证图像大小不变将padding设为1
# 第一个卷积
self.conv1 = nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
# 第二个卷积
self.conv2 = nn.Conv2d(channels, channels,
kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(channels,channels,kernel_size=1)
def forward(self, x):
# 激活
y = F.relu(self.conv1(x))
y = self.conv2(y)
# 先求和 后激活
z = self.conv3(x)
return F.relu(z + y)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5)
self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)
self.mp = nn.MaxPool2d(2)
self.rblock1 = ResidualBlock(16)
self.rblock2 = ResidualBlock(32)
self.fc = torch.nn.Linear(512, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.rblock1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.rblock2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
net = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 0):
inputs, targets = data
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
# forward
y_pred = net(inputs)
# backward
loss = criterion(y_pred, targets)
loss.backward()
# update
optimizer.step()
running_loss += loss.item()
if (batch_idx % 300 == 299):
print("[%d,%d]loss:%.3f" % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
accuracy = []
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += (labels == predicted).sum().item()
print("accuracy on test set:%d %% [%d/%d]" % (100 * correct / total, correct, total))
accuracy.append(100 * correct / total)
if __name__ == "__main__":
for epoch in range(10):
train(epoch)
test()
plt.plot(range(10), accuracy)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.grid()
plt.show()
print("done")
运行结果: