from audioop import bias
import torch
import torch.nn as nn
class SSD(nn.Module):
def __init__(self):
super(SSD, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x):
x = self.conv1(x)
return x
model = SSD()
model.train()
input = torch.ones([1,3,256,256])
output = model(input)
print(model.training)
print(output.shape)
print("*"*20)
with torch.no_grad():
model.eval()
input = torch.ones([1, 3, 256, 256])
output = model(input)
print(model.training)
print(output.shape)
在model.eval()打开之后,model.training=False
在model内部的self.training=False,直接执行下一块result=self.postprocess(locs, confs)