没有数据集的话,把download=False改成True就可以自动下载了
模型结构: model.pyimport torch
import torchvision
from torch import nn
from torch.nn import Sequential
from torch.utils.data import DataLoader
class classifier(nn.Module):
def __init__(self):
super().__init__()
self.model1 = Sequential(
nn.Conv2d(3, 32, 5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, padding=2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
train_test.py
from torch.utils.tensorboard import SummaryWriter
from model import *
# 加载训练集和测试集
train_data = torchvision.datasets.CIFAR10("../data", train=True, transform=torchvision.transforms.ToTensor(),
download=False)
train_dataloader = DataLoader(train_data, batch_size=64)
test_data = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(),
download=False)
test_dataloader = DataLoader(test_data, batch_size=64)
# 实例化模型、损失函数、优化器、tensorboard
myClassifier = classifier()
loss = nn.CrossEntropyLoss()
optim = torch.optim.SGD(myClassifier.parameters(), lr=0.01) # 把模型里的参数都传进去
writer = SummaryWriter("../logs_train_test")
# 训练
epochs = 10
batch_step = 0
epoch_step = 0
for i in range(epochs):
print("------------第{}轮训练开始------------".format(i))
for data in train_dataloader:
imgs, targets = data
output = myClassifier(imgs)
result_loss = loss(output, targets) # 计算损失,打印出来长这样:tensor(2.3185, grad_fn=)
optim.zero_grad()
result_loss.backward() # 对一个batch的loss进行反向传播,计算每个参数的梯度(保存在参数自己身上)
optim.step() # 调参
batch_step = batch_step + 1
if batch_step % 100 == 0: # 每更新100次参数就打印一下此时的loss,画一下图
print("训练次数:{}时,训练集的loss为:{}".format(batch_step, result_loss.item()))
writer.add_scalar("train_loss", result_loss.item(), batch_step)
# 一轮epoch之后,用测试集测试一下loss
total_testLoss = 0.0
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
output = myClassifier(imgs)
test_loss = loss(output, targets) # 它是怎么计算的,不懂为什么是传这两个参数这种形式
total_testLoss = total_testLoss + test_loss
print("这个epoch上面,测试集的loss为:{}".format(total_testLoss))
writer.add_scalar("test_loss", total_testLoss, epoch_step)
epoch_step = epoch_step + 1
# 保存每一个epoch的模型
torch.save(myClassifier, "myClassifier_{}.pth".format(epoch_step))
print("模型已保存")
writer.close()



