torchvision.datasets
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
train_datasets=datasets.MNIST(root='../dataset/mnist',
train=True,
transform=transform.ToTensor,
download=True)#下载训练集
test_datasets=datasets.MNIST(root='../dataset/mnist',
train=False,
transform=transform.ToTensor,
download=True)#下载测试集
batch-size就是样本数量
一次循环就是一次epoch
每次迭代都是一次mini—batch
DataLoader : batch_size=2,shuffle=True#注 window一般不支持shuffle
(shuffle=True意为是否打乱顺序)
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy=np.loadtxt(filepath,delimiter=',',dtype=np.float32)
self.len=xy.shape[0]
x_data=torch.from_numpy(xy[:,:-1])
y_data=torch.from_numpy(xy[:,[-1]])
def __getitem__(self,index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset=DiabetesDataset()
dataset=DiabetesDataset(path)
train_loader=DataLoader(dataset=dataset,
batch_size=20,
shuffle=True,
num_workers=4)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1=torch.nn.Linear(8,6)
self.linear2=torch.nn.Linear(6,4)
self.linear3=torch.nn.Linear(4,1)
self.sigmoid=torch.nn.Sigmoid()
def forward(self,x):
x=self.sigmoid(self.linear1(x))
x=self.sigmoid(self.linear2(x))
x=self.sigmoid(self.linear3(x))
return x
model=Model()
criterion=torch.nn.MSELoss(reduction='sum')#pytorch自带的损失函数计算器
#优化器
optimizer=torch.optim.SGD(model.parameters(),lr=0.02)#lr为学习率
#当然还有其他的优化器,看大家的选择
if __name__=='__main__':
for epoch in range(100):
for i,data in enumerate(train_loader,0):
inputs,labels=data
y_pred=model(inputs)
loss=criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad() #梯度归零
loss.backward()#反向传播
optimizer.step()