实现代码如下:
import torch.functional as F class LeNet(torch.nn.Module): def __init__(self): super(LeNet, self).__init__() # 1 input image channel (black & white), 6 output channels, 5x5 square convolution # kernel
#其中 表示图像的channel,6表示output的channel维度,5表示卷积核的大小
self.conv1 = torch.nn.Conv2d(1, 6, 5) self.conv2 = torch.nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b
#在这里我们无需考虑batch这一个维度,只需要考虑我们6图像的大小,图像的长和宽根据公示计算出来为6*6,而一共有16个维度,因此这里放入16*16*6,
#如果不想计算这个长和宽,则可以直接使用view操作
self.fc1 = torch.nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = torch.nn.Linear(120, 84) self.fc3 = torch.nn.Linear(84, 10) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x #这个函数是为了得到图像的长和宽,(一般来说还会得到图像的batch,因此我们将其舍弃) def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features