当前位置: 首页 > article >正文

小土堆学习笔记10(利用GPU训练于模型验证)

1.利用GPU训练

GPU可优化操作如下

操作方法1方法2
数据获取判断是否可以使用GPU,如果可以直接model.cuda()先设定device,用的时候直接model.to(“device”)
损失函数

1.1利用以前实战模型训练(经过完整测试最高到70%左右的正确率)

实战模型如下:

小土堆学习笔记5(sequential与小实战)-CSDN博客

具体代码如下:

class mymodel(Module):
    def __init__(self):
        super(mymodel,self).__init__()
        self.compose = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2)
        )
        self.linear = Sequential(
            # ReLU(inplace=True),
            Flatten(),
            Linear(in_features=64*4*4,out_features=64),
            Linear(in_features=64,out_features=10)
        )
    def forward(self,x):
        output = self.compose(x)
        output = self.linear(output)
        return output

1.1.1方式1

完整代码如下:

import torch
import torchvision.datasets
from torch.nn import *
from torch.utils.data import DataLoader
# #绘图(方式1)
# import matplotlib.pyplot as plt
#绘图(tensorboard)
from torch.utils.tensorboard import SummaryWriter

#记录时间
import time

#tensorboard使用
waiter = SummaryWriter("model_logs")


#数据集的导入
#训练集
train = torchvision.datasets.CIFAR10(root="../data",train=True,transform=torchvision.transforms.ToTensor(),download=True)
#测试集
test = torchvision.datasets.CIFAR10(root="../data",train=False,transform=torchvision.transforms.ToTensor(),download=True)

#数据集长度获取
train_len = len(train)
test_len = len(test)
print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len,test_len))

#载入数据集(数据预处理)
train_dataloader = DataLoader(train,batch_size=64,shuffle=True)
test_dataloader = DataLoader(test,batch_size=64,shuffle=True)

#神经网络
class mymodel(Module):
    def __init__(self):
        super(mymodel,self).__init__()
        self.compose = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2)
        )

        self.linear = Sequential(
            Flatten(),
            Linear(in_features=64*4*4,out_features=64),
            Linear(in_features=64,out_features=10)
        )
    def forward(self,x):
        output = self.compose(x)
        output = self.linear(output)
        return output



#模型实例化
model = mymodel()
#GPU的使用
if torch.cuda.is_available():
    model = model.cuda()


#损失函数
Loss_Cal = CrossEntropyLoss()
#这个也有GPU
if torch.cuda.is_available():
    Loss_Cal = Loss_Cal.cuda()

#优化器
#学习速率
learn_rate = 1e-2
#优化器构建
optiom = torch.optim.SGD(model.parameters(),lr=learn_rate)

#训练的次数
total_train_step = 0
#测试的次数
total_test_step = 0
#训练轮数
epoch = 10

start_time = time.time()






for i in range(epoch):
    print("---------第 {} 轮训练开始---------\n".format(i+1),end="")
    #训练集训练
    # model.train()(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in train_dataloader:
        img,target = data
        if torch.cuda.is_available():
            img = img.cuda() #这里也有
            target = target.cuda()
        out_img = model(img)
        #损失函数计算
        Loss = Loss_Cal(out_img,target)
        # 优化器使用
        #梯度清零
        optiom.zero_grad()
        #反向传播(梯度获取)
        Loss.backward()
        #梯度优化
        optiom.step()
        #训练次数加1
        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            end_time = time.time()
            print(end_time-start_time)
            print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step,Loss.item()))
            waiter.add_scalar("train_loss",Loss.item(),total_train_step)
    #测试集误差
    # model.eval()#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    total_loss = 0
    # 测试集整体正确个数
    total_accurate = 0
    #在没有梯度情况下使用
    with torch.no_grad():
        for data in test_dataloader:
            img,target = data
            if torch.cuda.is_available():
                img = img.cuda()  # 这里也有
                target = target.cuda()
            out = model(img)
            test_loss = Loss_Cal(out,target)
            total_loss = total_loss + test_loss.item()
            #每次正确个数(横向看)(用输出的argmax可以知道)
            accurate = (out.argmax(1) == target).sum()
            total_accurate = total_accurate + accurate

    print("测试集整体误差为:{}".format(total_loss))
    print("测试集整体正确率为:{0:.2f}%".format(total_accurate*100/test_len))
    waiter.add_scalar("test_loss", total_loss, total_test_step)
    waiter.add_scalar("test_loss_rate", total_accurate*100/test_len, total_test_step)

    total_test_step = total_test_step + 1
    #模型保存(方式1)
    torch.save(model,"model{}.pth".format(i))
    #方式2
    # torch.save(model.state_dict(),"model{}.pth".format(i))
    print("模型已保存")

waiter.close()


重要关注点:

模型实例化

#模型实例化
model = mymodel()
#GPU的使用
if torch.cuda.is_available():
    model = model.cuda()

训练集图像取出

        img,target = data
        if torch.cuda.is_available():
            img = img.cuda() #这里也有
            target = target.cuda()

测试集图像取出

    #在没有梯度情况下使用
    with torch.no_grad():
        for data in test_dataloader:
            img,target = data
            if torch.cuda.is_available():
                img = img.cuda()  # 这里也有
                target = target.cuda()

 1.1.2 方式2

完整代码

import torch
import torchvision.datasets
from torch.nn import *
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
 # 记录时间
import time
#神经网络
class mymodel(Module):
    def __init__(self):
        super(mymodel,self).__init__()
        self.compose = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding="same"),
            MaxPool2d(kernel_size=2)
        )
        self.linear = Sequential(
            # ReLU(inplace=True),
            Flatten(),
            Linear(in_features=64*4*4,out_features=64),
            Linear(in_features=64,out_features=10)
        )
    def forward(self,x):
        output = self.compose(x)
        output = self.linear(output)
        return output
    transform_train = transforms.Compose([
        # transforms.RandomHorizontalFlip(),  # 随机水平翻转
        # transforms.RandomVerticalFlip(),#随机竖直翻转
        transforms.ToTensor(),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
    ])



    # 定义训练模型的设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    '''
        仅有数据获取和损失函数可以用GPU
    '''

    # tensorboard使用
    # waiter = SummaryWriter("model_logs")

    # 数据集的导入
    # 训练集
    train = torchvision.datasets.CIFAR10(root="../data", train=True, transform=transform_train, download=True)
    # 测试集
    test = torchvision.datasets.CIFAR10(root="../data", train=False, transform=transform_test, download=True)

    # 数据集长度获取
    train_len = len(train)
    test_len = len(test)
    print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len, test_len))

    # 载入数据集(数据预处理)
    train_dataloader = DataLoader(train, batch_size=64, shuffle=True)
    test_dataloader = DataLoader(test, batch_size=64, shuffle=True)

    # 模型实例化
    model = mymodel()

    # GPU的使用
    model = model.to(device)

    # 模型参数导入
    # model_dict = torch.load("model3.pth",weights_only=False)
    # model.load_state_dict(model_dict)

    # 损失函数
    Loss_Cal = CrossEntropyLoss()
    # 这个也有GPU
    Loss_Cal = Loss_Cal.to(device)

    # 优化器
    # 学习速率
    learn_rate = 1e-2
    # 优化器构建
    optiom = torch.optim.SGD(model.parameters(), lr=learn_rate)

    # 训练的次数
    total_train_step = 0
    # 测试的次数
    total_test_step = 0
    # 训练轮数
    epoch = 200

    start_time = time.time()

    for i in range(epoch):
        print("---------第 {} 轮训练开始---------\n".format(i + 1), end="")
        # 训练集训练
        # model.train()(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
        train_right = 0

        for data in train_dataloader:
            img, target = data
            img = img.to(device)  # 这里也有
            target = target.to(device)
            out_img = model(img)
            # 损失函数计算
            Loss = Loss_Cal(out_img, target)
            # 优化器使用
            # 梯度清零
            optiom.zero_grad()
            # 反向传播(梯度获取)
            Loss.backward()
            # 梯度优化
            optiom.step()
            # 训练次数加1
            total_train_step = total_train_step + 1

            train_right_once = (out_img.argmax(1) == target).sum()
            train_right = train_right + train_right_once

            if total_train_step % 100 == 0:
                if total_train_step == 100:
                    end_time = time.time()
                    print(end_time - start_time)
                    print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step, Loss.item()))
                # waiter.add_scalar("train_loss",Loss.item(),total_train_step)

        print("训练集整体正确率为:{0:.2f}%".format(train_right * 100 / train_len))
        # 测试集误差
        # model.eval()#(可选,用于模型中有Dropout, BatchNorm, etc.中)
        total_loss = 0
        # 测试集整体正确个数
        total_accurate = 0
        # 在没有梯度情况下使用
        with torch.no_grad():
            for data in test_dataloader:
                img, target = data
                img = img.to(device)
                target = target.to(device)
                out = model(img)
                test_loss = Loss_Cal(out, target)
                total_loss = total_loss + test_loss.item()
                # 每次正确个数(横向看)(用输出的argmax可以知道)
                accurate = (out.argmax(1) == target).sum()
                total_accurate = total_accurate + accurate

        # print("测试集整体误差为:{}".format(total_loss))
        print("测试集整体正确率为:{0:.2f}%".format(total_accurate * 100 / test_len))
        # waiter.add_scalar("test_loss", total_loss, total_test_step)
        # waiter.add_scalar("test_loss_rate", total_accurate*100/test_len, total_test_step)

        total_test_step = total_test_step + 1
        if (total_accurate * 100 / test_len) > 80:
            torch.save(model.state_dict(), "model80.pth")
            print("模型已保存")
            exit(0)

注意要点:

device赋值

    # 定义训练模型的设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

模型实例化

    # 模型实例化
    model = mymodel()

    # GPU的使用
    model = model.to(device)

 训练集与测试集数据取出

        for data in train_dataloader:
            img, target = data
            img = img.to(device)  # 这里也有
            target = target.to(device)

        with torch.no_grad():
            for data in test_dataloader:
                img, target = data
                img = img.to(device)
                target = target.to(device)

1.2利用VGG16训练(经过测试最多89%一般会卡在81-82%)

完整代码如下:

import torch
import torchvision.datasets
from PIL import Image
from torch.nn import *
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter

#记录时间
import time

from torchvision import transforms
from torchvision.transforms import Resize, ToTensor

#定义训练模型的设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
'''
    仅有数据获取和损失函数可以用GPU
'''

# 数据集的导入及数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 归一化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


#数据集的导入
#训练集
train = torchvision.datasets.CIFAR10(root="../data",train=True,transform=transform_train,download=True)
#测试集
test = torchvision.datasets.CIFAR10(root="../data",train=False,transform=transform_test,download=True)

#数据集长度获取
train_len = len(train)
test_len = len(test)
print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len,test_len))

#载入数据集(数据预处理)
train_dataloader = DataLoader(train,batch_size=100,shuffle=True)
test_dataloader = DataLoader(test,batch_size=100,shuffle=True)




#模型实例化
model = torchvision.models.vgg16()
model.features[1] = Sigmoid()

model.classifier.add_module("7",Linear(in_features=1000,out_features=10))



#GPU的使用
model = model.to(device)



#损失函数
Loss_Cal = CrossEntropyLoss()
#这个也有GPU
Loss_Cal = Loss_Cal.to(device)

#优化器
#学习速率
learn_rate = 1e-4

# 优化器构建
optiom = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=1e-4)  # 使用Adam优化器并添加L2正则化

# 学习率衰减
scheduler = StepLR(optiom, step_size=20, gamma=0.1)

#训练的次数
total_train_step = 0
#测试的次数
total_test_step = 0
#训练轮数
epoch = 80

start_time = time.time()

for i in range(epoch):
    print("---------第 {} 轮训练开始---------\n".format(i+1),end="")
    train_right = 0
    #训练集训练
    model.train()#(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in train_dataloader:
        img,target = data
        img = img.to(device) #这里也有
        target = target.to(device)
        out_img = model(img)
        #损失函数计算
        Loss = Loss_Cal(out_img,target)
        # 优化器使用
        #梯度清零
        optiom.zero_grad()
        #反向传播(梯度获取)
        Loss.backward()
        #梯度优化
        optiom.step()
        #训练次数加1
        total_train_step = total_train_step + 1

        train_right_once = (out_img.argmax(1) == target).sum()
        train_right = train_right + train_right_once
        if total_train_step % 100 == 0:
            end_time = time.time()
             if total_train_step == 100:
                  print(end_time-start_time)
            # print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step,Loss.item()))

    #测试集误差

    print("训练集整体正确率为:{0:.2f}%".format(train_right * 100 / train_len))
    total_loss = 0
    # 测试集整体正确个数
    total_accurate = 0
    #在没有梯度情况下使用
    model.eval()  # (可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in test_dataloader:
        img,target = data
        img = img.to(device)
        target = target.to(device)
        out = model(img)
        test_loss = Loss_Cal(out,target)
        total_loss = total_loss + test_loss.item()
        #每次正确个数(横向看)(用输出的argmax可以知道)
        accurate = (out.argmax(1) == target).sum()
        total_accurate = total_accurate + accurate

    # print("测试集整体误差为:{}".format(total_loss))
    print("测试集整体正确率为:{0:.2f}%".format(total_accurate*100/test_len))

    total_test_step = total_test_step + 1
    if (total_accurate*100/test_len)>80:
        torch. Save(model.state_dict(),"model_accurate_rate80.pth")
        break
    elif (total_accurate*100/test_len)>90:
        torch.save(model.state_dict(), "model_accurate_rate90.pth")
        break

成果展现:

---------第 60 轮训练开始---------
训练次数:29600,Loss值为:0.10109
训练次数:29700,Loss值为:0.11309
训练次数:29800,Loss值为:0.03813
训练次数:29900,Loss值为:0.11464
训练次数:30000,Loss值为:0.24277
训练集整体正确率为:95.81%
测试集整体误差为:65.32272988557816
测试集整体正确率为:83.37%
---------第 61 轮训练开始---------
训练次数:30100,Loss值为:0.17496
训练次数:30200,Loss值为:0.04714
训练次数:30300,Loss值为:0.15686
训练次数:30400,Loss值为:0.04534
训练次数:30500,Loss值为:0.24100
训练集整体正确率为:96.00%
测试集整体误差为:72.67696017026901
测试集整体正确率为:82.71%
---------第 62 轮训练开始---------
训练次数:30600,Loss值为:0.13827
训练次数:30700,Loss值为:0.09803
训练次数:30800,Loss值为:0.12379
训练次数:30900,Loss值为:0.04453
训练次数:31000,Loss值为:0.05138
训练集整体正确率为:95.76%
测试集整体误差为:66.17632550001144
测试集整体正确率为:84.10%
---------第 63 轮训练开始---------
训练次数:31100,Loss值为:0.08484
训练次数:31200,Loss值为:0.15044
训练次数:31300,Loss值为:0.13587
训练次数:31400,Loss值为:0.13113
训练次数:31500,Loss值为:0.06151
训练集整体正确率为:96.05%
测试集整体误差为:66.87014165520668
测试集整体正确率为:83.38%
---------第 64 轮训练开始---------
训练次数:31600,Loss值为:0.13451
训练次数:31700,Loss值为:0.17018
训练次数:31800,Loss值为:0.11295
训练次数:31900,Loss值为:0.20130
训练次数:32000,Loss值为:0.23258
训练集整体正确率为:96.14%
测试集整体误差为:66.570760846138
测试集整体正确率为:82.62%
---------第 65 轮训练开始---------
训练次数:32100,Loss值为:0.12391
训练次数:32200,Loss值为:0.03606
训练次数:32300,Loss值为:0.12146
训练次数:32400,Loss值为:0.15146
训练次数:32500,Loss值为:0.13847
训练集整体正确率为:96.33%
测试集整体误差为:66.38061390817165
测试集整体正确率为:83.74%
---------第 66 轮训练开始---------
训练次数:32600,Loss值为:0.11775
训练次数:32700,Loss值为:0.18038
训练次数:32800,Loss值为:0.10574
训练次数:32900,Loss值为:0.11084
训练次数:33000,Loss值为:0.13767
训练集整体正确率为:96.49%
测试集整体误差为:69.21106803417206
测试集整体正确率为:84.46%
---------第 67 轮训练开始---------
训练次数:33100,Loss值为:0.09896
训练次数:33200,Loss值为:0.12010
训练次数:33300,Loss值为:0.08347
训练次数:33400,Loss值为:0.08270
训练次数:33500,Loss值为:0.18042
训练集整体正确率为:96.29%
测试集整体误差为:71.32115119695663
测试集整体正确率为:83.52%
---------第 68 轮训练开始---------
训练次数:33600,Loss值为:0.13187
训练次数:33700,Loss值为:0.07140
训练次数:33800,Loss值为:0.02574
训练次数:33900,Loss值为:0.05127
训练次数:34000,Loss值为:0.11608
训练集整体正确率为:96.50%
测试集整体误差为:66.48537555336952
测试集整体正确率为:83.72%
---------第 69 轮训练开始---------
训练次数:34100,Loss值为:0.05971
训练次数:34200,Loss值为:0.19054
训练次数:34300,Loss值为:0.10288
训练次数:34400,Loss值为:0.06640
训练次数:34500,Loss值为:0.18923
训练集整体正确率为:96.50%
测试集整体误差为:65.59952509403229
测试集整体正确率为:84.09%
---------第 70 轮训练开始---------
训练次数:34600,Loss值为:0.06690
训练次数:34700,Loss值为:0.09345
训练次数:34800,Loss值为:0.08450
训练次数:34900,Loss值为:0.09504
训练次数:35000,Loss值为:0.06995
训练集整体正确率为:96.73%
测试集整体误差为:71.92208224534988
测试集整体正确率为:83.46%
---------第 71 轮训练开始---------
训练次数:35100,Loss值为:0.07069
训练次数:35200,Loss值为:0.15355
训练次数:35300,Loss值为:0.10830
训练次数:35400,Loss值为:0.03445
训练次数:35500,Loss值为:0.11961
训练集整体正确率为:96.76%
测试集整体误差为:70.81322455406189
测试集整体正确率为:83.42%
---------第 72 轮训练开始---------
训练次数:35600,Loss值为:0.03358
训练次数:35700,Loss值为:0.18448
训练次数:35800,Loss值为:0.11305
训练次数:35900,Loss值为:0.13451
训练次数:36000,Loss值为:0.02999
训练集整体正确率为:96.85%
测试集整体误差为:75.76497927308083
测试集整体正确率为:83.52%
---------第 73 轮训练开始---------
训练次数:36100,Loss值为:0.17219
训练次数:36200,Loss值为:0.04997
训练次数:36300,Loss值为:0.09683
训练次数:36400,Loss值为:0.11514
训练次数:36500,Loss值为:0.12263
训练集整体正确率为:96.66%
测试集整体误差为:69.5647574365139
测试集整体正确率为:83.84%

相关说明:

可以自行打印vgg16的神经网络发现有dropout,所以要用model.eval和model.train

如下:

VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
    (7): Linear(in_features=1000, out_features=10, bias=True)
  )
)

1.2.1改变之处

  • 加入了图像预处理防止过拟合
# 数据集的导入及数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 归一化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
  • 模型实例化更改
    #模型实例化
    model = torchvision.models.vgg16()
    model.features[1] = Sigmoid()
    
    model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
  •  模型训练与测试不同:
model.train()#(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    
 model.eval()  # (可选,用于模型中有Dropout, BatchNorm, etc.中)

优化器更改(为了更快一点)

#优化器
#学习速率
learn_rate = 1e-4

# 优化器构建
optiom = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=1e-4)  # 使用Adam优化器并添加L2正则化

如果要让VGG16快一点可以加入一点点东西

例如1:

模型实例化中加入weights = ‘DEFAULT‘

model = torchvision.models.vgg16(weights='DEFAULT')

例如2:

学习速率变大

#优化器
#学习速率
learn_rate = 1e-4

速度加快后全代码如下(请确保您可以有效访问相关网络):

import os

import torch
import torchvision.datasets
from PIL import Image
from torch.nn import *
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter

#记录时间
import time

from torchvision import transforms
from torchvision.transforms import Resize, ToTensor

#定义训练模型的设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
'''
    仅有数据获取和损失函数可以用GPU
'''

# 数据集的导入及数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 归一化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


#数据集的导入
#训练集
train = torchvision.datasets.CIFAR10(root="../data",train=True,transform=transform_train,download=True)
#测试集
test = torchvision.datasets.CIFAR10(root="../data",train=False,transform=transform_test,download=True)

#数据集长度获取
train_len = len(train)
test_len = len(test)
print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len,test_len))

#载入数据集(数据预处理)
train_dataloader = DataLoader(train,batch_size=100,shuffle=True)
test_dataloader = DataLoader(test,batch_size=100,shuffle=True)




#模型实例化
model = torchvision.models.vgg16(weights='DEFAULT')
model.features[1] = Sigmoid()

model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
#上次模型参数加载
# model_dict = torch.load("model_accurate_rate80_vgg.pth", weights_only=False)
# model.load_state_dict(model_dict)


#GPU的使用
model = model.to(device)



#损失函数
Loss_Cal = CrossEntropyLoss()
#这个也有GPU
Loss_Cal = Loss_Cal.to(device)

#优化器
#学习速率
learn_rate = 1e-3

# 优化器构建
optiom = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=1e-4)  # 使用Adam优化器并添加L2正则化

# 学习率衰减
scheduler = StepLR(optiom, step_size=20, gamma=0.1)

#训练的次数
total_train_step = 0
#测试的次数
total_test_step = 0
#训练轮数
epoch = 80

start_time = time.time()

for i in range(epoch):
    print("---------第 {} 轮训练开始---------\n".format(i+1),end="")
    train_right = 0
    #训练集训练
    model.train()#(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in train_dataloader:
        img,target = data
        img = img.to(device) #这里也有
        target = target.to(device)
        out_img = model(img)
        #损失函数计算
        Loss = Loss_Cal(out_img,target)
        # 优化器使用
        #梯度清零
        optiom.zero_grad()
        #反向传播(梯度获取)
        Loss.backward()
        #梯度优化
        optiom.step()
        #训练次数加1
        total_train_step = total_train_step + 1

        train_right_once = (out_img.argmax(1) == target).sum()
        train_right = train_right + train_right_once
        if total_train_step % 100 == 0:
            end_time = time.time()
            if total_train_step == 100:
                print(end_time-start_time)
            # print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step,Loss.item()))

    #测试集误差

    print("训练集整体正确率为:{0:.2f}%".format(train_right * 100 / train_len))
    total_loss = 0
    # 测试集整体正确个数
    total_accurate = 0
    #在没有梯度情况下使用
    model.eval()  # (可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in test_dataloader:
        img,target = data
        img = img.to(device)
        target = target.to(device)
        out = model(img)
        test_loss = Loss_Cal(out,target)
        total_loss = total_loss + test_loss.item()
        #每次正确个数(横向看)(用输出的argmax可以知道)
        accurate = (out.argmax(1) == target).sum()
        total_accurate = total_accurate + accurate

    # print("测试集整体误差为:{}".format(total_loss))
    print("测试集整体正确率为:{0:.2f}%".format(total_accurate*100/test_len))

    total_test_step = total_test_step + 1
    if (total_accurate*100/test_len)>88:
        torch. Save(model.state_dict(),"model_accurate_rate88.pth")
        break

2.模型测试

完整代码如下:

import os.path
import torch
import torchvision
from PIL import Image

from torch.nn import *
from torchvision.transforms import Resize, ToTensor


test = torchvision.datasets.CIFAR10(root="../data",train=False)
dictk=test.class_to_idx

total_img_path = []

class mymodel(Module):
    def __init__(self):
        super(mymodel,self).__init__()
        self.compose = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2)
        )

        self.linear = Sequential(
            # ReLU(inplace=True),
            Flatten(),
            Linear(in_features=64*4*4,out_features=64),
            Linear(in_features=64,out_features=10)
        )
    def forward(self,x):
        output = self.compose(x)
        output = self.linear(output)
        return output

#图像导入
root_path = "../image"
for i in range(12):
    image_path = f"{i+1}.png"
    total_img_path.append(os.path.join(root_path,image_path))
print(total_img_path)
#图像处理
transforms = torchvision.transforms.Compose([
    Resize([32,32]),
    ToTensor()
])


#模型导入
#模型实例化
model = mymodel()
# model = torchvision.models.vgg16()
# model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
#参数导入
model_dict = torch.load("model80.pth", weights_only=False)
model.load_state_dict(model_dict)

def get_key(val):
    for key, value in dictk.items():
        if val == value:
            return key

    return "There is no such Key"
print(dictk)
for i in range(12):
    image = Image.open(total_img_path[i])
    img_input = transforms(image)
    img_input = torch.reshape(img_input,[1,3,32,32])
    model.eval()
    out = model(img_input)
    out = out.argmax(1)
    print(get_key(out.item()),end="---")

2.1代码详解

test = torchvision.datasets.CIFAR10(root="../data",train=False)
dictk=test.class_to_idx

这两句是为了获取那个字典类型,就是为了获取airplane,dog,cat等具体分类类别

class mymodel(Module):
    def __init__(self):
        super(mymodel,self).__init__()
        self.compose = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding="same"),
            Sigmoid(),
            MaxPool2d(kernel_size=2)
        )

        self.linear = Sequential(
            # ReLU(inplace=True),
            Flatten(),
            Linear(in_features=64*4*4,out_features=64),
            Linear(in_features=64,out_features=10)
        )
    def forward(self,x):
        output = self.compose(x)
        output = self.linear(output)
        return output

上面是神经网络

#图像导入
root_path = "../image"
for i in range(12):
    image_path = f"{i+1}.png"
    total_img_path.append(os.path.join(root_path,image_path))
print(total_img_path)

这个是每张照片的路径获取

建议建立一个新文件加名字叫image,然后图片命名为1.png,2.png...

#图像处理
transforms = torchvision.transforms.Compose([
    Resize([32,32]),
    ToTensor()
])

图像预处理为了适配模型

#模型实例化
model = mymodel()
# model = torchvision.models.vgg16()
# model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
#参数导入
model_dict = torch.load("model3.pth", weights_only=False)
model.load_state_dict(model_dict)

模型实例化与模型参数导入

def get_key(val):
    for key, value in dictk.items():
        if val == value:
            return key

    return "There is no such Key"

依据字典的Value寻找key

for i in range(12):
    image = Image.open(total_img_path[i])
    img_input = transforms(image)
    img_input = torch.reshape(img_input,[1,3,32,32])
    model.eval()
    out = model(img_input)
    out = out.argmax(1)
    print(get_key(out.item()),end="---")

遍历每一张图片运行模型,输出最终结果(以airplane的形式输出)

具体实例以VGG16为例(需要网络版):

import os

import torch
import torchvision.datasets
from PIL import Image
from torch.nn import *
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter

#记录时间
import time

from torchvision import transforms
from torchvision.transforms import Resize, ToTensor

#定义训练模型的设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
'''
    仅有数据获取和损失函数可以用GPU
'''

# 数据集的导入及数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 归一化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


#数据集的导入
#训练集
train = torchvision.datasets.CIFAR10(root="../data",train=True,transform=transform_train,download=True)
#测试集
test = torchvision.datasets.CIFAR10(root="../data",train=False,transform=transform_test,download=True)

#数据集长度获取
train_len = len(train)
test_len = len(test)
print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len,test_len))

#载入数据集(数据预处理)
train_dataloader = DataLoader(train,batch_size=100,shuffle=True)
test_dataloader = DataLoader(test,batch_size=100,shuffle=True)




#模型实例化
model = torchvision.models.vgg16(weights='DEFAULT')
model.features[1] = Sigmoid()

model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
#上次模型参数加载(第二次运行就可以)
# model_dict = torch.load("model_accurate_rate88.pth", weights_only=False)
# model.load_state_dict(model_dict)


#GPU的使用
model = model.to(device)



#损失函数
Loss_Cal = CrossEntropyLoss()
#这个也有GPU
Loss_Cal = Loss_Cal.to(device)

#优化器
#学习速率
learn_rate = 1e-4

# 优化器构建
optiom = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=1e-4)  # 使用Adam优化器并添加L2正则化

# 学习率衰减
scheduler = StepLR(optiom, step_size=20, gamma=0.1)

#训练的次数
total_train_step = 0
#测试的次数
total_test_step = 0
#训练轮数
epoch = 80

start_time = time.time()

for i in range(epoch):
    print("---------第 {} 轮训练开始---------\n".format(i+1),end="")
    train_right = 0
    #训练集训练
    model.train()#(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in train_dataloader:
        img,target = data
        img = img.to(device) #这里也有
        target = target.to(device)
        out_img = model(img)
        #损失函数计算
        Loss = Loss_Cal(out_img,target)
        # 优化器使用
        #梯度清零
        optiom.zero_grad()
        #反向传播(梯度获取)
        Loss.backward()
        #梯度优化
        optiom.step()
        #训练次数加1
        total_train_step = total_train_step + 1

        train_right_once = (out_img.argmax(1) == target).sum()
        train_right = train_right + train_right_once
        if total_train_step % 100 == 0:
            end_time = time.time()
            if total_train_step == 100:
                print("训练100次所用时间:{0}".format(end_time-start_time))
            # print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step,Loss.item()))

    #测试集误差

    print("训练集整体正确率为:{0:.2f}%".format(train_right * 100 / train_len))
    total_loss = 0
    # 测试集整体正确个数
    total_accurate = 0
    #在没有梯度情况下使用
    model.eval()  # (可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in test_dataloader:
        img,target = data
        img = img.to(device)
        target = target.to(device)
        out = model(img)
        test_loss = Loss_Cal(out,target)
        total_loss = total_loss + test_loss.item()
        #每次正确个数(横向看)(用输出的argmax可以知道)
        accurate = (out.argmax(1) == target).sum()
        total_accurate = total_accurate + accurate

    # print("测试集整体误差为:{}".format(total_loss))
    print("测试集整体正确率为:{0:.2f}%".format(total_accurate*100/test_len))

    total_test_step = total_test_step + 1
    if (total_accurate*100/test_len)>88:
        torch.save(model.state_dict(),"model_accurate_rate88.pth")
        break


if __name__ == "__main__" :
    dictk = test.class_to_idx

    total_img_path = []

    # 图像导入
    root_path = "../image"
    for i in range(12):
        image_path = f"{i + 1}.png"
        total_img_path.append(os.path.join(root_path, image_path))
    print(total_img_path)
    # 图像处理
    transforms = torchvision.transforms.Compose([
        Resize([32, 32]),
        ToTensor()
    ])

    # 模型导入
    # 模型实例化
    model = torchvision.models.vgg16()
    model.classifier.add_module("7", Linear(in_features=1000, out_features=10))
    # 参数导入
    model_dict = torch.load("model_accurate_rate88.pth", weights_only=False)
    model.load_state_dict(model_dict)


    def get_key(val):
        for key, value in dictk.items():
            if val == value:
                return key

        return "There is no such Key"


    print(dictk)
    print("正确概率5cat,2airplane,2dog,3ship")
    for i in range(12):
        image = Image.open(total_img_path[i])
        img_input = transforms(image)
        img_input = torch.reshape(img_input, [1, 3, 32, 32])
        model.eval()
        out = model(img_input)
        out = out.argmax(1)
        print(get_key(out.item()), end="---")

成果展现

cuda
Files already downloaded and verified
Files already downloaded and verified
训练集长度为:50000,
训练集长度为:10000
---------第 1 轮训练开始---------
训练100次所用时间:13.613473653793335
训练集整体正确率为:71.94%
测试集整体正确率为:79.89%
---------第 2 轮训练开始---------
训练集整体正确率为:82.76%
测试集整体正确率为:84.29%
---------第 3 轮训练开始---------
训练集整体正确率为:85.84%
测试集整体正确率为:85.29%
---------第 4 轮训练开始---------
训练集整体正确率为:87.93%
测试集整体正确率为:87.18%
---------第 5 轮训练开始---------
训练集整体正确率为:89.27%
测试集整体正确率为:87.71%
---------第 6 轮训练开始---------
训练集整体正确率为:90.78%
测试集整体正确率为:88.51%
['../image\\1.png', '../image\\2.png', '../image\\3.png', '../image\\4.png', '../image\\5.png', '../image\\6.png', '../image\\7.png', '../image\\8.png', '../image\\9.png', '../image\\10.png', '../image\\11.png', '../image\\12.png']
{'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4, 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}
cat---bird---cat---cat---cat---airplane---airplane---dog---dog---ship---airplane---ship---

错了2个可能是图片不清楚

不需要网络版

不需要网络可以下在我的资源(纯免费)。然后修改一下下面两行

import os

import torch
import torchvision.datasets
from PIL import Image
from torch.nn import *
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader

from torch.utils.tensorboard import SummaryWriter

#记录时间
import time

from torchvision import transforms
from torchvision.transforms import Resize, ToTensor

#定义训练模型的设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
'''
    仅有数据获取和损失函数可以用GPU
'''

# 数据集的导入及数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 归一化
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


#数据集的导入
#训练集
train = torchvision.datasets.CIFAR10(root="../data",train=True,transform=transform_train,download=True)
#测试集
test = torchvision.datasets.CIFAR10(root="../data",train=False,transform=transform_test,download=True)

#数据集长度获取
train_len = len(train)
test_len = len(test)
print("训练集长度为:{0},\n训练集长度为:{1}".format(train_len,test_len))

#载入数据集(数据预处理)
train_dataloader = DataLoader(train,batch_size=100,shuffle=True)
test_dataloader = DataLoader(test,batch_size=100,shuffle=True)




#模型实例化(这里与上面不一样)!!!
model = torchvision.models.vgg16()
model.features[1] = Sigmoid()

model.classifier.add_module("7",Linear(in_features=1000,out_features=10))
#上次模型参数加载(第二次运行就可以)
model_dict = torch.load("model_accurate_rate88.pth", weights_only=False)
model.load_state_dict(model_dict)


#GPU的使用
model = model.to(device)



#损失函数
Loss_Cal = CrossEntropyLoss()
#这个也有GPU
Loss_Cal = Loss_Cal.to(device)

#优化器
#学习速率
learn_rate = 1e-4

# 优化器构建
optiom = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=1e-4)  # 使用Adam优化器并添加L2正则化

# 学习率衰减
scheduler = StepLR(optiom, step_size=20, gamma=0.1)

#训练的次数
total_train_step = 0
#测试的次数
total_test_step = 0
#训练轮数
epoch = 80

start_time = time.time()

for i in range(epoch):
    print("---------第 {} 轮训练开始---------\n".format(i+1),end="")
    train_right = 0
    #训练集训练
    model.train()#(这仅对某些模块有影响。请参阅特定模块的文档,以了解它们在训练/评估模式下的行为,)#(可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in train_dataloader:
        img,target = data
        img = img.to(device) #这里也有
        target = target.to(device)
        out_img = model(img)
        #损失函数计算
        Loss = Loss_Cal(out_img,target)
        # 优化器使用
        #梯度清零
        optiom.zero_grad()
        #反向传播(梯度获取)
        Loss.backward()
        #梯度优化
        optiom.step()
        #训练次数加1
        total_train_step = total_train_step + 1

        train_right_once = (out_img.argmax(1) == target).sum()
        train_right = train_right + train_right_once
        if total_train_step % 100 == 0:
            end_time = time.time()
            if total_train_step == 100:
                print("训练100次所用时间:{0}".format(end_time-start_time))
            # print("训练次数:{0},Loss值为:{1:.5f}".format(total_train_step,Loss.item()))

    #测试集误差

    print("训练集整体正确率为:{0:.2f}%".format(train_right * 100 / train_len))
    total_loss = 0
    # 测试集整体正确个数
    total_accurate = 0
    #在没有梯度情况下使用
    model.eval()  # (可选,用于模型中有Dropout, BatchNorm, etc.中)
    for data in test_dataloader:
        img,target = data
        img = img.to(device)
        target = target.to(device)
        out = model(img)
        test_loss = Loss_Cal(out,target)
        total_loss = total_loss + test_loss.item()
        #每次正确个数(横向看)(用输出的argmax可以知道)
        accurate = (out.argmax(1) == target).sum()
        total_accurate = total_accurate + accurate

    # print("测试集整体误差为:{}".format(total_loss))
    print("测试集整体正确率为:{0:.2f}%".format(total_accurate*100/test_len))

    total_test_step = total_test_step + 1
    if (total_accurate*100/test_len)>88:
        torch.save(model.state_dict(),"model_accurate_rate88.pth")
        break


if __name__ == "__main__" :
    dictk = test.class_to_idx

    total_img_path = []

    # 图像导入
    root_path = "../image"
    for i in range(12):
        image_path = f"{i + 1}.png"
        total_img_path.append(os.path.join(root_path, image_path))
    print(total_img_path)
    # 图像处理
    transforms = torchvision.transforms.Compose([
        Resize([32, 32]),
        ToTensor()
    ])

    # 模型导入
    # 模型实例化
    model = torchvision.models.vgg16()
    model.classifier.add_module("7", Linear(in_features=1000, out_features=10))
    # 参数导入
    model_dict = torch.load("model_accurate_rate88.pth", weights_only=False)
    model.load_state_dict(model_dict)


    def get_key(val):
        for key, value in dictk.items():
            if val == value:
                return key

        return "There is no such Key"


    print(dictk)
    print("正确概率5cat,2airplane,2dog,3ship")
    for i in range(12):
        image = Image.open(total_img_path[i])
        img_input = transforms(image)
        img_input = torch.reshape(img_input, [1, 3, 32, 32])
        model.eval()
        out = model(img_input)
        out = out.argmax(1)
        print(get_key(out.item()), end="---")

成果展现:

训练100次所用时间:13.408914566040039
训练集整体正确率为:91.14%
测试集整体正确率为:88.02%
['../image\\1.png', '../image\\2.png', '../image\\3.png', '../image\\4.png', '../image\\5.png', '../image\\6.png', '../image\\7.png', '../image\\8.png', '../image\\9.png', '../image\\10.png', '../image\\11.png', '../image\\12.png']
{'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4, 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}
正确概率5cat,2airplane,2dog,3ship
cat---bird---horse---cat---dog---airplane---airplane---dog---dog---ship---bird---ship---

资源链接(不要积分纯免费直接下就行):

【免费】小土堆学习笔记相关资源资源-CSDN文库


http://www.kler.cn/a/513703.html

相关文章:

  • Github 2025-01-20 开源项目周报 Top15
  • TCP断开通信前的四次挥手(为啥不是三次?)
  • 音频入门(一):音频基础知识与分类的基本流程
  • 【JavaSE】(8) String 类
  • 《从入门到精通:蓝桥杯编程大赛知识点全攻略》(五)-数的三次方根、机器人跳跃问题、四平方和
  • windows 远程链接 Ubuntu 24.04 LTS 图形界面
  • 【论文复现】基于改进鲸鱼优化算法的太阳能光伏模型参数提取问题
  • 嵌入式Linux驱动开发之从设备树到点亮LED
  • 使用 Python 获取淘宝商品描述的 API 接口实现与应用
  • C# 委托和事件(事件)
  • 如何用vscode断点调试Vue.js的项目
  • 在亚马逊云科技上用AI提示词优化功能写出漂亮提示词(上)
  • 解决windows系统远程桌面(或其他全屏应用)全屏后菜单栏不自动消失问题
  • Python多态的概念
  • R语言基础| 回归分析
  • ubuntu下安装gvim
  • 面试-字符串1
  • 解决使用code命令时的bash: code: command not found问题
  • 【JavaScript】for ... 循环语句的使用方法和示例,示例 for 嵌套---九九乘法表
  • SpringBoot项目中替换指定版本的tomcat
  • 7、数组知识点汇总
  • 学习ASP.NET Core的身份认证(基于JwtBearer的身份认证8)
  • 2024国游销量前20游戏分析:某开放世界武侠(排名11)
  • python基础语句整理
  • c++R赋值原始字符串
  • 【Leetcode 热题 100】118. 杨辉三角