李沐笔记(softmax回归)

回归:估计一个连续值(房价问题)

分类:预测一个离散类别(预测图片中是猫是狗)

kaggle上的分类问题:将人类蛋白质显微镜图片分成28类、将恶意软件分成9类、将恶意的Wikipedia评论分成7类。

李沐笔记(softmax回归)

 李沐笔记(softmax回归)李沐笔记(softmax回归)

 李沐笔记(softmax回归)李沐笔记(softmax回归)

 李沐笔记(softmax回归)

 损失函数:

李沐笔记(softmax回归)

 李沐笔记(softmax回归)

李沐笔记(softmax回归)

softmax回归从0开始实现

import matplotlib.pyplot as plt
import torch
from IPython import display
from d2l import torch as d2l

batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

# 初始化模型参数
num_inputs = 784  # 长度为784的向量,28*28=784
num_outputs = 10  # 十个类别

W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)
b = torch.zeros(num_outputs, requires_grad=True)

# # 对矩阵求和
# X = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
# print(X.sum(0, keepdim=True), X.sum(1, keepdim=True))

# 实现softmax
# 1)对每个项求幂(使用exp);
# 2)对每一行求和(小批量中每个样本是一行),得到每个样本的归一化常数;
# 3)将每一行除以其归一化常数,确保结果的和为1。
def softmax(X):
    X_exp = torch.exp(X)  # 对每个元素做指数
    partition = X_exp.sum(1, keepdim=True)  # 行求和
    return X_exp / partition  # 广播机制

X = torch.normal(0, 1, (2, 5))
X_prob = softmax(X)
print(X_prob, X_prob.sum(1))

# 实现softmax回归模型
def net(X):
    return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)

# 创建一个y_hat,(两个样本在三个类别的预测概率)使用y作为y_hat中概率的索引
# 创建一个长度为2的向量,表示两个真实的标号
y = torch.tensor([0, 2])
y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])   # 假设有三个类别
print(y_hat[[0, 1], y])   # y0=1——0.1、y1=2——0.5

# 实现交叉熵损失函数
def cross_entropy(y_hat, y):
    return -torch.log(y_hat[range(len(y_hat)), y])

print(cross_entropy(y_hat, y))

# 预测值和真实值进行比较
def accuracy(y_hat, y):
    """计算预测正确的数量"""
    # 判断是否为矩阵
    if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
        y_hat = y_hat.argmax(axis=1)   # 每行最大值的下标存到y_hat
    cmp = y_hat.type(y.dtype) == y   # 将 y_hat的数据类型转换为与 y的数据类型一致,再进行比较
    return float(cmp.type(y.dtype).sum())   # 进行求和

print(accuracy(y_hat, y) / len(y))    # 得到正确预测的数量

# 评估在任意模型net的准确率
def evaluate_accuracy(net, data_iter):
    """计算在指定数据集上模型的精度"""
    if isinstance(net, torch.nn.Module):
        net.eval()  # 将模型设置为评估模式
    metric = Accumulator(2)  # 正确预测数、预测总数
    for X, y in data_iter:
        metric.add(accuracy(net(X), y), y.numel())
    print(f'test_acc:{metric[0] / metric[1]}')
    return metric[0] / metric[1]     # 正确数/总数

# 创建两个变量存储正确数和总数
class Accumulator:
    """在`n`个变量上累加。"""
    def __init__(self, n):
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):
        self.data = [0.0] * len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

# print(evaluate_accuracy(net, test_iter))

# softmax回归训练
def train_epoch_ch3(net, train_iter, loss, updater):
    """训练模型一个迭代周期"""
    # 将模型设置为训练模式
    if isinstance(net, torch.nn.Module):
        net.train()
    # 训练损失总和、训练准确度总和、样本数
    metric = Accumulator(3)    # 长度为3的迭代器累加信息
    for X, y in train_iter:
        # 计算梯度并更新参数
        y_hat = net(X)
        l = loss(y_hat, y)
        if isinstance(updater, torch.optim.Optimizer):
            # 使用PyTorch内置的优化器和损失函数
            updater.zero_grad()   # 梯度设为0
            l.backward()    # 计算梯度
            updater.step()   #更新
            metric.add(float(l) * len(y), accuracy(y_hat, y), y.size().numel())
        else:
            # 使用定制的优化器和损失函数
            l.sum().backward()    # 求和计算梯度
            updater(X.shape[0])
            metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
    # 返回训练损失和训练准确率
    print(f'train_loss:{metric[0] / metric[2]},train_acc:{metric[1] / metric[2]}')
    return metric[0] / metric[2], metric[1] / metric[2]

# 定义一个动画中绘制数据的实用程序类
class Animator:
    """在动画中绘制数据。"""
    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
                 ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(3.5, 2.5)):
        # 增量地绘制多条线
        if legend is None:
            legend = []
        d2l.use_svg_display()
        self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        # 使用lambda函数捕获参数
        self.config_axes = lambda: d2l.set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, y):
        # 向图表中添加多个数据点
        if not hasattr(y, "__len__"):
            y = [y]
        n = len(y)
        if not hasattr(x, "__len__"):
            x = [x] * n
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, (a, b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X[i].append(a)
                self.Y[i].append(b)
        self.axes[0].cla()
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes[0].plot(x, y, fmt)
        self.config_axes()
        display.display(self.fig)
        plt.draw();plt.pause(0.001)
        display.clear_output(wait=True)

# 训练函数
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
    """训练模型"""
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9], legend=['train loss', 'train acc', 'test acc'])
    for epoch in range(num_epochs):
        train_metrics = train_epoch_ch3(net, train_iter, loss, updater)    # 训练误差
        test_acc = evaluate_accuracy(net, test_iter)    # 测试集上评估精度
        animator.add(epoch + 1, train_metrics + (test_acc,))   # 显示误差和精度
    train_loss, train_acc = train_metrics
    assert train_loss < 0.5, train_loss
    assert train_acc <= 1 and train_acc > 0.7, train_acc
    assert test_acc <= 1 and test_acc > 0.7, test_acc
    print(f'train_acc:{train_acc},train_loss:{train_loss},test_acc:{test_acc}')
# 小批量随机梯度下降来优化模型的损失函数
lr = 0.1

def updater(batch_size):
    return d2l.sgd([W, b], lr, batch_size)

# 训练模型10个迭代周期
num_epochs = 10
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, updater)

# 对图像进行分类预测
def predict_ch3(net, test_iter, n=6):
    """预测标签"""
    for X, y in test_iter:
        break
    trues = d2l.get_fashion_mnist_labels(y)
    preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
    titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
    d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
    d2l.plt.show()

predict_ch3(net, test_iter)

softmax回归的简洁实现

import torch
from torch import nn
from d2l import torch as d2l

batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

# softmax回归的输出层是一个全连接层
# PyTorch不会隐式地调整输入的形状。因此
# 在线性层前定义了展平层(flatten),来调整网络输入的形状
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))

# m是当面的linear
def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)   # 均值为0,方差为0.01

net.apply(init_weights);

# 在交叉熵损失函数中传递未归一化的预测,并同时计算softmax及其对数
loss = nn.CrossEntropyLoss()

# 使用学习率为0.1的小批量随机梯度下降作为优化算法
trainer = torch.optim.SGD(net.parameters(),lr=0.1)

# 调用之前定义的训练函数来训练模型
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)

上一篇:pytorch训练损失为NAN(NLLLoss)


下一篇:《伽罗瓦理论》笔记7