逻辑回归(pytorch)

import torchvision
import torch
import torch.nn.functional as F


# 1. 准备数据集
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[0], [0], [1]])

# 2. 设计模型 继承自torch.nn.Module
class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)

    # 覆盖父类方法
    def forward(self, x):
        """预测结果 计算loss"""
        y_pred = F.sigmoid(self.linear(x))
        return y_pred
model = LinearModel()
print(model)
# LinearModel(
#  (linear): Linear(in_features=1, out_features=1, bias=True)
# )

# 3.构造损失函数  + 优化器
# 损失函数
criterion = torch.nn.BCELoss(size_average=False)
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

# 4. 训练周期
for epoch in range(10000):
    # 调用forward函数
    y_pred = model(x_data)
    # 计算损失 loss为一个标量
    loss = criterion(y_pred, y_data)
    # print(epoch, loss)
    # 清空本次计算数据 梯度清零
    optimizer.zero_grad()
    # 反向传播
    loss.backward()
    # 更新参数
    optimizer.step()
    print(epoch, loss.item())

# 5. 测试结果
print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())

x_test = torch.Tensor([[2.0]])
y_test = model(x_test)
print('y_pred = ', y_test.data.item())

 

上一篇:论文阅读《Beyond Self-attention: External Attention using Two Linear Layers for Visual Tasks》


下一篇:PaddlePaddle-basic-tutorial