torch_feed_mynet

code_torch_feed_mynet

  • 这里更改了自己的neural network

  • import torch.nn as nn
    
    
    class MyNetwork(nn.Module):
    
        def __init__(self,input_size=28,hidden_size=100 ,num_classes=10):
            pass
            super(MyNetwork,self).__init__()
            # 这里用的是两层的FC网络,中间加个ReLU激活层
            self.fc1 = nn.Linear(in_features=input_size, out_features=hidden_size)
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(in_features=hidden_size, out_features=num_classes)
    
        def forward(self,x):
            # 在forward中定义前推的流程
            out1 = self.fc1(x)
            out_relu1 = self.relu(out1)
            out2 = self.fc2(out_relu1)
            return out2
    
    
  • 然后基于logistic的代码吧model换成MyNetwork

  • import torch
    import torch.nn as nn
    import torchvision
    import torchvision.transforms as transforms
    from MyNetwork import MyNetwork
    
    ##############################################################################
    
    # 此份代码是根据torch_logistic_regression修改model部分
    
    ##############################################################################
    # Hyper-parameters
    input_size = 28 * 28
    num_classes = 10
    num_epochs = 20
    batch_size = 100
    learning_rate = 0.001
    
    
    # Mnist dataset
    train_dataset = torchvision.datasets.MNIST(root='../../data',
                                               train=True,
                                               transform=transforms.ToTensor(),
                                               download=True)
    test_dataset = torchvision.datasets.MNIST(root='../../data',
                                              train=False,
                                              transform=transforms.ToTensor())
    
    
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False)
    
    
    # model = nn.Linear(input_size, num_classes)
    model = MyNetwork(input_size=input_size, hidden_size=100,num_classes=num_classes)
    
    # Loss and optimizer
    # nn.CrossEntropyLoss() computes softmax internally
    criterion = nn.CrossEntropyLoss()
    # 这里优化器试了下Adam,效果还不错,在这个人物下比SGD好点
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    # Train the model
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        for i ,(images,lables) in enumerate(train_loader):
            # Reshape images to (batch_size,input_size)
            images = images.reshape(-1,input_size)
    
            # Forward pass
            outputs = model(images)
            loss = criterion(outputs, lables)
    
            # backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 100 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
    
    
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.reshape(-1, input_size)
            outputs = model(images)
            # 拿到最大的那个当作预测结果
            _, predicted = torch.max(outputs.data,1)
            total += labels.size(0)
            correct += (predicted == labels).sum()
        # 计算精确度
        print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
    
    
    
    # Save the model checkpoint
    torch.save(model.state_dict(), 'model.ckpt')
    
    
  • torch_feed_mynet
上一篇:Linux系统中限制用户su-权限的方法


下一篇:网络层(TCP/UDP)攻击与防御原理