当前位置: 首页 > article >正文

第P9周-Pytorch实现YOLOv5-Backbone模块

  • 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
  • 🍖 原作者:K同学啊

目标

  1. 优化代码结构
  2. 了解YOLOv5-backbone模块

具体实现

(一)环境

语言环境:Python 3.10
编 译 器: PyCharm
框 架: Pytorch

(二)具体步骤

代码结构如下:
image.png

1. Utils.py
import torch  
import pathlib  
import matplotlib.pyplot as plt  
from torchvision.transforms import transforms  
  
  
# 第一步:设置GPU  
def USE_GPU():  
    if torch.cuda.is_available():  
        print('CUDA is available, will use GPU')  
        device = torch.device("cuda")  
    else:  
        print('CUDA is not available. Will use CPU')  
        device = torch.device("cpu")  
  
    return device  
  
temp_dict = dict()  
def recursive_iterate(path):  
    """  
    根据所提供的路径遍历该路径下的所有子目录,列出所有子目录下的文件  
    :param path: 路径  
    :return: 返回最后一级目录的数据  
    """    path = pathlib.Path(path)  
    for file in path.iterdir():  
        if file.is_file():  
            temp_key = str(file).split('\\')[-2]  
            if temp_key in temp_dict:  
                temp_dict.update({temp_key: temp_dict[temp_key] + 1})  
            else:  
                temp_dict.update({temp_key: 1})  
            # print(file)  
        elif file.is_dir():  
            recursive_iterate(file)  
  
    return temp_dict  
  
  
def data_from_directory(directory, train_dir=None, test_dir=None, show=False):  
    """  
    提供是的数据集是文件形式的,提供目录方式导入数据,简单分析数据并返回数据分类  
    :param test_dir: 是否设置了测试集目录  
    :param train_dir: 是否设置了训练集目录  
    :param directory: 数据集所在目录  
    :param show: 是否需要以柱状图形式显示数据分类情况,默认显示  
    :return: 数据分类列表,类型: list  
    """    global total_image  
    print("数据目录:{}".format(directory))  
    data_dir = pathlib.Path(directory)  
  
    # for d in data_dir.glob('**/*'): # **/*通配符可以遍历所有子目录  
    #     if d.is_dir():  
    #         print(d)    class_name = []  
    total_image = 0  
    # temp_sum = 0  
  
    if train_dir is None or test_dir is None:  
        data_path = list(data_dir.glob('*'))  
        class_name = [str(path).split('\\')[-1] for path in data_path]  
        print("数据分类: {}, 类别数量:{}".format(class_name, len(list(data_dir.glob('*')))))  
        total_image = len(list(data_dir.glob('*/*')))  
        print("图片数据总数: {}".format(total_image))  
    else:  
        temp_dict.clear()  
        train_data_path = directory + '/' + train_dir  
        train_data_info = recursive_iterate(train_data_path)  
        print("{}目录:{},{}".format(train_dir, train_data_path, train_data_info))  
  
        temp_dict.clear()  
        test_data_path = directory + '/' + test_dir  
        print("{}目录:{},{}".format(test_dir,  test_data_path, recursive_iterate(test_data_path)))  
        class_name = temp_dict.keys()  
  
    if show:  
        # 隐藏警告  
        import warnings  
        warnings.filterwarnings("ignore")  # 忽略警告信息  
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签  
        plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号  
        plt.rcParams['figure.dpi'] = 100  # 分辨率  
  
        for i in class_name:  
            data = len(list(pathlib.Path((directory + '\\' + i + '\\')).glob('*')))  
            plt.title('数据分类情况')  
            plt.grid(ls='--', alpha=0.5)  
            plt.bar(i, data)  
            plt.text(i, data, str(data), ha='center', va='bottom')  
            print("类别-{}:{}".format(i, data))  
            # temp_sum += data  
        plt.show()  
  
    # if temp_sum == total_image:  
    #     print("图片数据总数检查一致")  
    # else:    #     print("数据数据总数检查不一致,请检查数据集是否正确!")  
    return class_name  
  
  
def get_transforms_setting(size):  
    """  
    获取transforms的初始设置  
    :param size: 图片大小  
    :return: transforms.compose设置  
    """    transform_setting = {  
        'train': transforms.Compose([  
            transforms.Resize(size),  
            transforms.ToTensor(),  
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  
        ]),  
        'test': transforms.Compose([  
            transforms.Resize(size),  
            transforms.ToTensor(),  
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  
        ])  
    }  
  
    return transform_setting  
  
  
# 训练循环  
def train(dataloader, device, model, loss_fn, optimizer):  
    size = len(dataloader.dataset)  # 训练集的大小  
    num_batches = len(dataloader)  # 批次数目, (size/batch_size,向上取整)  
  
    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率  
  
    for X, y in dataloader:  # 获取图片及其标签  
        X, y = X.to(device), y.to(device)  
  
        # 计算预测误差  
        pred = model(X)  # 网络输出  
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失  
  
        # 反向传播  
        optimizer.zero_grad()  # grad属性归零  
        loss.backward()  # 反向传播  
        optimizer.step()  # 每一步自动更新  
  
        # 记录acc与loss  
        train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()  
        train_loss += loss.item()  
  
    train_acc /= size  
    train_loss /= num_batches  
  
    return train_acc, train_loss  
  
  
def test(dataloader, device, model, loss_fn):  
    size = len(dataloader.dataset)  # 测试集的大小  
    num_batches = len(dataloader)  # 批次数目, (size/batch_size,向上取整)  
    test_loss, test_acc = 0, 0  
  
    # 当不进行训练时,停止梯度更新,节省计算内存消耗  
    with torch.no_grad():  
        for imgs, target in dataloader:  
            imgs, target = imgs.to(device), target.to(device)  
  
            # 计算loss  
            target_pred = model(imgs)  
            loss = loss_fn(target_pred, target)  
  
            test_loss += loss.item()  
            test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()  
  
    test_acc /= size  
    test_loss /= num_batches  
  
    return test_acc, test_loss  
  
  
from PIL import Image  
  
def predict_one_image(image_path, device, model, transform, classes):  
    """  
    预测单张图片  
    :param image_path: 图片路径  
    :param device: CPU or GPU    :param model: cnn模型  
    :param transform:    :param classes:    :return:  
    """    test_img = Image.open(image_path).convert('RGB')  
    plt.imshow(test_img)  # 展示预测的图片  
  
    test_img = transform(test_img)  
    img = test_img.to(device).unsqueeze(0)  
  
    model.eval()  
    output = model(img)  
  
    _, pred = torch.max(output, 1)  
    pred_class = classes[pred]  
    print(f'预测结果是:{pred_class}')
2. config.py
import argparse  
  
def get_options(parser=argparse.ArgumentParser()):  
    parser.add_argument('--workers', type=int, default=0, help='Number of parallel workers')  
    parser.add_argument('--batch-size', type=int, default=4, help='input batch size, default=32')  
    parser.add_argument('--size', type=tuple, default=(224, 224), help='input image size')  
    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate, default=0.0001')  
    parser.add_argument('--epochs', type=int, default=20, help='number of epochs')  
    parser.add_argument('--seed', type=int, default=112, help='random seed')  
    parser.add_argument('--save-path', type=str, default='./models/', help='path to save checkpoints')  
  
    opt = parser.parse_args()  
  
    if opt:  
        print(f'num_workers:{opt.workers}')  
        print(f'batch_size:{opt.batch_size}')  
        print(f'learn rate:{opt.lr}')  
        print(f'epochs:{opt.epochs}')  
        print(f'random seed:{opt.seed}')  
        print(f'save_path:{opt.save_path}')  
  
    return opt  
  
if __name__ == '__main__':  
    opt = get_options()
3. dataset.py
import os  
  
import torch  
from PIL import Image  
from torch.utils.data import Dataset  
  
class CaptchaDataset(Dataset):  
    def __init__(self, data_dir, transform, characters):  
        self.file_list = list() # 保存每个训练数据的路径  
        files = os.listdir(data_dir) # 获取data_dir中的全部文件  
        for file in files:  
            path = os.path.join(data_dir, file)  
            self.file_list.append(path)  
  
        self.transform = transform  # 将数据转换对象保存到类中  
  
        # 设置chars等于0-9,表示验证码中可能会出现的字符  
        # chars = '0123456789'  
        self.char2int = {}  # 创建一个字符到数据的字典  
        for i, char in enumerate(characters):  
            self.char2int[char] = i  
  
    def __len__(self):  
        """  
        直接返回数据集中样本数量  
        :return: 数据集中样本数量  
        """        return len(self.file_list)  
  
    def __getitem__(self, index):  
        """  
        传入索引index,得到与该索引对应的数据与标签  
        :param index: 索引  
        :return: 数据与标签  
        """        file_path = self.file_list[index]   # 获取数据的路径  
        # 因为不需要通过颜色来识别字符,因为转换为灰色后,可提升模型的鲁棒性  
        image = Image.open(file_path).convert('L')  
        # 使用transform转换数据,将图片数据转换为张量  
        image = self.transform(image)  
        # 获取该数据图片的标签  
        label_char = os.path.basename(file_path).split('_')[0]  
  
        label = list()  
        for char in label_char:  
            label.append(self.char2int[char]) # 将其中的字符转换为数字  
        label = torch.tensor(label, dtype=torch.long)  
  
        return image, label  
  
  
import matplotlib.pyplot as plt  
def show_image(data, label):  
    # 将每个小批量数量中的8个图片和对应的标签显示出来  
    for i in range(len(data)):  
        plt.subplot(2, 4, i + 1)  
        plt.imshow(data[i].squeeze())  
        plt.title(label[i].item())  
        plt.axis('off')  
    plt.show()  
  
from torch.utils.data import DataLoader  
from torchvision import transforms  
  
if __name__ == '__main__':  
    transform = transforms.Compose([  
        transforms.Resize((128, 128)), # 将图片缩放到指定的大小  
        transforms.ToTensor(), # 将图片数据转换为张量  
    ])  
  
    # 定义CapchaDataset对象dataset  
    dataset = CaptchaDataset('data/captcha/4digits/', transform)  
    dataloader = DataLoader(dataset, batch_size=64, shuffle=True)  
  
    # 编写一个循环,模板小批量梯度下降迭代时的数据读取  
    for epoch in range(3):  
        print("epoch = %d" % epoch)  
        for batch_idx, (data, label) in enumerate(dataloader):  
            print("batch_idx = %d, label = %s" % (batch_idx, label))  
            show_image(data, label)
4.** model.py (P8周的网络模块也在这里)
import warnings  
  
import torch  
import torch.nn as nn  
  
  
# 搭建模型  
def autopad(k, p=None):  # kernel, padding  
    # Pad to 'same'    if p is None:  
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad  
    return p  
  
  
class Conv(nn.Module):  
    # Standard convolution  
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups  
        super().__init__()  
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)  
        self.bn = nn.BatchNorm2d(c2)  
        self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())  
  
    def forward(self, x):  
        return self.act(self.bn(self.conv(x)))  
  
  
class Bottleneck(nn.Module):  
    # Standard bottleneck  
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion  
        super().__init__()  
        c_ = int(c2 * e)  # hidden channels  
        self.cv1 = Conv(c1, c_, 1, 1)  
        self.cv2 = Conv(c_, c2, 3, 1, g=g)  
        self.add = shortcut and c1 == c2  
  
    def forward(self, x):  
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))  
  
  
class C3(nn.Module):  
    # CSP Bottleneck with 3 convolutions  
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion  
        super().__init__()  
        c_ = int(c2 * e)  # hidden channels  
        self.cv1 = Conv(c1, c_, 1, 1)  
        self.cv2 = Conv(c1, c_, 1, 1)  
        self.cv3 = Conv(2 * c_, c2, 1)  # act=FReLU(c2)  
        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))  
  
    def forward(self, x):  
        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))  
  
class SPPF(nn.Module):  
    # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher  
    def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))  
        super().__init__()  
        c_ = c1 // 2  # hidden channels  
        self.cv1 = Conv(c1, c_, 1, 1)  
        self.cv2 = Conv(c_ * 4, c2, 1, 1)  
        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)  
  
    def forward(self, x):  
        x = self.cv1(x)  
        with warnings.catch_warnings():  
            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning  
            y1 = self.m(x)  
            y2 = self.m(y1)  
            return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))  
  
  
class model_K(nn.Module):  
    def __init__(self):  
        super(model_K, self).__init__()  
  
        # 卷积模块  
        self.Conv = Conv(3, 32, 3, 2)  
  
        # C3模块1  
        self.C3_1 = C3(32, 64, 3, 2)  
  
        # 全连接网络层,用于分类  
        self.classifier = nn.Sequential(  
            nn.Linear(in_features=802816, out_features=100),  
            nn.ReLU(),  
            nn.Linear(in_features=100, out_features=4)  
        )  
  
    def forward(self, x):  
        x = self.Conv(x)  
        x = self.C3_1(x)  
        x = torch.flatten(x, start_dim=1)  
        x = self.classifier(x)  
  
        return x  
  
  
class YOLOv5_backbone(nn.Module):  
    def __init__(self):  
        super(YOLOv5_backbone, self).__init__()  
  
        self.Conv_1 = Conv(3, 64, 3, 2, 2)  
        self.Conv_2 = Conv(64, 128, 3, 2)  
        self.C3_3 = C3(128, 128)  
        self.Conv_4 = Conv(128, 256, 3, 2)  
        self.C3_5 = C3(256, 256)  
        self.Conv_6 = Conv(256, 512, 3, 2)  
        self.C3_7 = C3(512, 512)  
        self.Conv_8 = Conv(512, 1024, 3, 2)  
        self.C3_9 = C3(1024, 1024)  
        self.SPPF = SPPF(1024, 1024, 5)  
  
        # 全连接网络层,用于分类  
        self.classifier = nn.Sequential(  
            nn.Linear(in_features=65536, out_features=100),  
            nn.ReLU(),  
            nn.Linear(in_features=100, out_features=4)  
        )  
  
    def forward(self, x):  
        x = self.Conv_1(x)  
        x = self.Conv_2(x)  
        x = self.C3_3(x)  
        x = self.Conv_4(x)  
        x = self.C3_5(x)  
        x = self.Conv_6(x)  
        x = self.C3_7(x)  
        x = self.Conv_8(x)  
        x = self.C3_9(x)  
        x = self.SPPF(x)  
  
        x = torch.flatten(x, start_dim=1)  
        x = self.classifier(x)  
  
        return x
5. train.py
import os  
  
import torch  
from torch import optim, nn  
  
from dataset import WeatherDataset  
from model import YOLOv5_backbone  
from config import get_options  
from Utils import data_from_directory, get_transforms_setting, USE_GPU, train, test  
  
device = USE_GPU()  
opt = get_options()  
  
DATA_DIR = "./data/weather_photos"  
classNames = data_from_directory(DATA_DIR)  
  
transform = get_transforms_setting(opt.size)  
  
  
model_save_path = opt.save_path  
model_name = 'weather-v5.pth'  
model_save_name = model_save_path + '/' + model_name  
# 创建模型文件夹  
if not os.path.exists(model_save_path):  
    os.makedirs(model_save_path)  
  
total_data= WeatherDataset(DATA_DIR, transform['train'])  
train_dataset = total_data.__getds__('train')  
test_dataset = total_data.__getds__('test')  
  
train_dl = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True)  
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=True)  
for X, y in test_dl:  
    print("Shape of X[N, C, H, W]:", X.shape)  
    print("Shape of y:", y.shape, y.dtype)  
    break  
  
# 创建模型对象  
model = YOLOv5_backbone().to(device)  
print(model)  
  
# 查看模型详情  
import torchsummary as summary  
summary.summary(model, (3, 224, 224))  
  
# 正式训练  
import copy  
  
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)  
loss_fn = nn.CrossEntropyLoss()  # 创建损失函数  
  
epochs = 20  
  
train_loss = []  
train_acc = []  
test_loss = []  
test_acc = []  
  
best_acc = 0  # 设置一个最佳准确率,作为最佳模型的判别指标  
  
for epoch in range(epochs):  
  
    model.train()  
    epoch_train_acc, epoch_train_loss = train(train_dl, device, model, loss_fn, optimizer)  
  
    model.eval()  
    epoch_test_acc, epoch_test_loss = test(test_dl, device, model, loss_fn)  
  
    # 保存最佳模型到 best_model    if epoch_test_acc > best_acc:  
        best_acc = epoch_test_acc  
        best_model = copy.deepcopy(model)  
  
    train_acc.append(epoch_train_acc)  
    train_loss.append(epoch_train_loss)  
    test_acc.append(epoch_test_acc)  
    test_loss.append(epoch_test_loss)  
  
    # 获取当前的学习率  
    lr = optimizer.state_dict()['param_groups'][0]['lr']  
  
    template = 'Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}'  
    print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,  
                          epoch_test_acc * 100, epoch_test_loss, lr))  
  
# 保存最佳模型到文件中  
torch.save(model.state_dict(), model_save_name)  
  
print('Done')  
  
# 模型训练结果可视化  
import matplotlib.pyplot as plt  
#隐藏警告  
import warnings  
warnings.filterwarnings("ignore")               #忽略警告信息  
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签  
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号  
plt.rcParams['figure.dpi']         = 100        #分辨率  
  
from datetime import datetime  
current_time = datetime.now() # 获取当前时间  
  
epochs_range = range(epochs)  
  
plt.figure(figsize=(12, 3))  
plt.subplot(1, 2, 1)  
  
plt.plot(epochs_range, train_acc, label='Training Accuracy')  
plt.plot(epochs_range, test_acc, label='Test Accuracy')  
plt.legend(loc='lower right')  
plt.title('Training and Validation Accuracy')  
plt.xlabel(current_time) # 打卡请带上时间戳,否则代码截图无效  
  
plt.subplot(1, 2, 2)  
plt.plot(epochs_range, train_loss, label='Training Loss')  
plt.plot(epochs_range, test_loss, label='Test Loss')  
plt.legend(loc='upper right')  
plt.title('Training and Validation Loss')  
plt.show()
(三)正式训练
E:\dev\AI\Pytorch\.venv\Scripts\python.exe E:\dev\AI\Pytorch\实战\P8&P9-YOLO实现\train.py 
CUDA is available, will use GPU
num_workers:0
batch_size:4
learn rate:0.0001
epochs:20
random seed:112
save_path:./models/
数据目录:./data/weather_photos
数据分类: ['cloudy', 'rain', 'shine', 'sunrise'], 类别数量:4
图片数据总数: 1125
Dataset ImageFolder
    Number of datapoints: 1125
    Root location: ./data/weather_photos
    StandardTransform
Transform: Compose(
               Resize(size=(224, 224), interpolation=bilinear, max_size=None, antialias=True)
               ToTensor()
               Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
           )
<torch.utils.data.dataset.Subset object at 0x00000189468193A0> <torch.utils.data.dataset.Subset object at 0x0000018946819400>
Shape of X[N, C, H, W]: torch.Size([4, 3, 224, 224])
Shape of y: torch.Size([4]) torch.int64
YOLOv5_backbone(
  (Conv_1): Conv(
    (conv): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), bias=False)
    (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (act): SiLU()
  )
  (Conv_2): Conv(
    (conv): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (act): SiLU()
  )
  (C3_3): C3(
    (cv1): Conv(
      (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv2): Conv(
      (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv3): Conv(
      (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (m): Sequential(
      (0): Bottleneck(
        (cv1): Conv(
          (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
          (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
        (cv2): Conv(
          (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
      )
    )
  )
  (Conv_4): Conv(
    (conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (act): SiLU()
  )
  (C3_5): C3(
    (cv1): Conv(
      (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv2): Conv(
      (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv3): Conv(
      (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (m): Sequential(
      (0): Bottleneck(
        (cv1): Conv(
          (conv): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
          (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
        (cv2): Conv(
          (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
      )
    )
  )
  (Conv_6): Conv(
    (conv): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (act): SiLU()
  )
  (C3_7): C3(
    (cv1): Conv(
      (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv2): Conv(
      (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv3): Conv(
      (conv): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (m): Sequential(
      (0): Bottleneck(
        (cv1): Conv(
          (conv): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
          (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
        (cv2): Conv(
          (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
      )
    )
  )
  (Conv_8): Conv(
    (conv): Conv2d(512, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (act): SiLU()
  )
  (C3_9): C3(
    (cv1): Conv(
      (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv2): Conv(
      (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv3): Conv(
      (conv): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (m): Sequential(
      (0): Bottleneck(
        (cv1): Conv(
          (conv): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
          (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
        (cv2): Conv(
          (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
          (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (act): SiLU()
        )
      )
    )
  )
  (SPPF): SPPF(
    (cv1): Conv(
      (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (cv2): Conv(
      (conv): Conv2d(2048, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (act): SiLU()
    )
    (m): MaxPool2d(kernel_size=5, stride=1, padding=2, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Linear(in_features=65536, out_features=100, bias=True)
    (1): ReLU()
    (2): Linear(in_features=100, out_features=4, bias=True)
  )
)
----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 64, 113, 113]           1,728
       BatchNorm2d-2         [-1, 64, 113, 113]             128
              SiLU-3         [-1, 64, 113, 113]               0
              Conv-4         [-1, 64, 113, 113]               0
            Conv2d-5          [-1, 128, 57, 57]          73,728
       BatchNorm2d-6          [-1, 128, 57, 57]             256
              SiLU-7          [-1, 128, 57, 57]               0
              Conv-8          [-1, 128, 57, 57]               0
            Conv2d-9           [-1, 64, 57, 57]           8,192
      BatchNorm2d-10           [-1, 64, 57, 57]             128
             SiLU-11           [-1, 64, 57, 57]               0
             Conv-12           [-1, 64, 57, 57]               0
           Conv2d-13           [-1, 64, 57, 57]           4,096
      BatchNorm2d-14           [-1, 64, 57, 57]             128
             SiLU-15           [-1, 64, 57, 57]               0
             Conv-16           [-1, 64, 57, 57]               0
           Conv2d-17           [-1, 64, 57, 57]          36,864
      BatchNorm2d-18           [-1, 64, 57, 57]             128
             SiLU-19           [-1, 64, 57, 57]               0
             Conv-20           [-1, 64, 57, 57]               0
       Bottleneck-21           [-1, 64, 57, 57]               0
           Conv2d-22           [-1, 64, 57, 57]           8,192
      BatchNorm2d-23           [-1, 64, 57, 57]             128
             SiLU-24           [-1, 64, 57, 57]               0
             Conv-25           [-1, 64, 57, 57]               0
           Conv2d-26          [-1, 128, 57, 57]          16,384
      BatchNorm2d-27          [-1, 128, 57, 57]             256
             SiLU-28          [-1, 128, 57, 57]               0
             Conv-29          [-1, 128, 57, 57]               0
               C3-30          [-1, 128, 57, 57]               0
           Conv2d-31          [-1, 256, 29, 29]         294,912
      BatchNorm2d-32          [-1, 256, 29, 29]             512
             SiLU-33          [-1, 256, 29, 29]               0
             Conv-34          [-1, 256, 29, 29]               0
           Conv2d-35          [-1, 128, 29, 29]          32,768
      BatchNorm2d-36          [-1, 128, 29, 29]             256
             SiLU-37          [-1, 128, 29, 29]               0
             Conv-38          [-1, 128, 29, 29]               0
           Conv2d-39          [-1, 128, 29, 29]          16,384
      BatchNorm2d-40          [-1, 128, 29, 29]             256
             SiLU-41          [-1, 128, 29, 29]               0
             Conv-42          [-1, 128, 29, 29]               0
           Conv2d-43          [-1, 128, 29, 29]         147,456
      BatchNorm2d-44          [-1, 128, 29, 29]             256
             SiLU-45          [-1, 128, 29, 29]               0
             Conv-46          [-1, 128, 29, 29]               0
       Bottleneck-47          [-1, 128, 29, 29]               0
           Conv2d-48          [-1, 128, 29, 29]          32,768
      BatchNorm2d-49          [-1, 128, 29, 29]             256
             SiLU-50          [-1, 128, 29, 29]               0
             Conv-51          [-1, 128, 29, 29]               0
           Conv2d-52          [-1, 256, 29, 29]          65,536
      BatchNorm2d-53          [-1, 256, 29, 29]             512
             SiLU-54          [-1, 256, 29, 29]               0
             Conv-55          [-1, 256, 29, 29]               0
               C3-56          [-1, 256, 29, 29]               0
           Conv2d-57          [-1, 512, 15, 15]       1,179,648
      BatchNorm2d-58          [-1, 512, 15, 15]           1,024
             SiLU-59          [-1, 512, 15, 15]               0
             Conv-60          [-1, 512, 15, 15]               0
           Conv2d-61          [-1, 256, 15, 15]         131,072
      BatchNorm2d-62          [-1, 256, 15, 15]             512
             SiLU-63          [-1, 256, 15, 15]               0
             Conv-64          [-1, 256, 15, 15]               0
           Conv2d-65          [-1, 256, 15, 15]          65,536
      BatchNorm2d-66          [-1, 256, 15, 15]             512
             SiLU-67          [-1, 256, 15, 15]               0
             Conv-68          [-1, 256, 15, 15]               0
           Conv2d-69          [-1, 256, 15, 15]         589,824
      BatchNorm2d-70          [-1, 256, 15, 15]             512
             SiLU-71          [-1, 256, 15, 15]               0
             Conv-72          [-1, 256, 15, 15]               0
       Bottleneck-73          [-1, 256, 15, 15]               0
           Conv2d-74          [-1, 256, 15, 15]         131,072
      BatchNorm2d-75          [-1, 256, 15, 15]             512
             SiLU-76          [-1, 256, 15, 15]               0
             Conv-77          [-1, 256, 15, 15]               0
           Conv2d-78          [-1, 512, 15, 15]         262,144
      BatchNorm2d-79          [-1, 512, 15, 15]           1,024
             SiLU-80          [-1, 512, 15, 15]               0
             Conv-81          [-1, 512, 15, 15]               0
               C3-82          [-1, 512, 15, 15]               0
           Conv2d-83           [-1, 1024, 8, 8]       4,718,592
      BatchNorm2d-84           [-1, 1024, 8, 8]           2,048
             SiLU-85           [-1, 1024, 8, 8]               0
             Conv-86           [-1, 1024, 8, 8]               0
           Conv2d-87            [-1, 512, 8, 8]         524,288
      BatchNorm2d-88            [-1, 512, 8, 8]           1,024
             SiLU-89            [-1, 512, 8, 8]               0
             Conv-90            [-1, 512, 8, 8]               0
           Conv2d-91            [-1, 512, 8, 8]         262,144
      BatchNorm2d-92            [-1, 512, 8, 8]           1,024
             SiLU-93            [-1, 512, 8, 8]               0
             Conv-94            [-1, 512, 8, 8]               0
           Conv2d-95            [-1, 512, 8, 8]       2,359,296
      BatchNorm2d-96            [-1, 512, 8, 8]           1,024
             SiLU-97            [-1, 512, 8, 8]               0
             Conv-98            [-1, 512, 8, 8]               0
       Bottleneck-99            [-1, 512, 8, 8]               0
          Conv2d-100            [-1, 512, 8, 8]         524,288
     BatchNorm2d-101            [-1, 512, 8, 8]           1,024
            SiLU-102            [-1, 512, 8, 8]               0
            Conv-103            [-1, 512, 8, 8]               0
          Conv2d-104           [-1, 1024, 8, 8]       1,048,576
     BatchNorm2d-105           [-1, 1024, 8, 8]           2,048
            SiLU-106           [-1, 1024, 8, 8]               0
            Conv-107           [-1, 1024, 8, 8]               0
              C3-108           [-1, 1024, 8, 8]               0
          Conv2d-109            [-1, 512, 8, 8]         524,288
     BatchNorm2d-110            [-1, 512, 8, 8]           1,024
            SiLU-111            [-1, 512, 8, 8]               0
            Conv-112            [-1, 512, 8, 8]               0
       MaxPool2d-113            [-1, 512, 8, 8]               0
       MaxPool2d-114            [-1, 512, 8, 8]               0
       MaxPool2d-115            [-1, 512, 8, 8]               0
          Conv2d-116           [-1, 1024, 8, 8]       2,097,152
     BatchNorm2d-117           [-1, 1024, 8, 8]           2,048
            SiLU-118           [-1, 1024, 8, 8]               0
            Conv-119           [-1, 1024, 8, 8]               0
            SPPF-120           [-1, 1024, 8, 8]               0
          Linear-121                  [-1, 100]       6,553,700
            ReLU-122                  [-1, 100]               0
          Linear-123                    [-1, 4]             404
================================================================
Total params: 21,729,592
Trainable params: 21,729,592
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 137.59
Params size (MB): 82.89
Estimated Total Size (MB): 221.06
----------------------------------------------------------------
Epoch: 1, Train_acc:55.0%, Train_loss:1.114, Test_acc:62.7%, Test_loss:1.046, Lr:1.00E-04
Epoch: 2, Train_acc:66.0%, Train_loss:0.803, Test_acc:66.2%, Test_loss:0.785, Lr:1.00E-04
Epoch: 3, Train_acc:69.2%, Train_loss:0.712, Test_acc:63.6%, Test_loss:0.883, Lr:1.00E-04
Epoch: 4, Train_acc:71.7%, Train_loss:0.722, Test_acc:76.0%, Test_loss:0.598, Lr:1.00E-04
Epoch: 5, Train_acc:74.8%, Train_loss:0.657, Test_acc:74.2%, Test_loss:0.610, Lr:1.00E-04
Epoch: 6, Train_acc:80.8%, Train_loss:0.499, Test_acc:78.7%, Test_loss:0.472, Lr:1.00E-04
Epoch: 7, Train_acc:84.3%, Train_loss:0.399, Test_acc:87.1%, Test_loss:0.360, Lr:1.00E-04
Epoch: 8, Train_acc:86.1%, Train_loss:0.343, Test_acc:86.2%, Test_loss:0.340, Lr:1.00E-04
Epoch: 9, Train_acc:88.3%, Train_loss:0.317, Test_acc:82.2%, Test_loss:0.479, Lr:1.00E-04
Epoch:10, Train_acc:88.4%, Train_loss:0.308, Test_acc:84.4%, Test_loss:0.368, Lr:1.00E-04
Epoch:11, Train_acc:90.0%, Train_loss:0.260, Test_acc:83.1%, Test_loss:0.392, Lr:1.00E-04
Epoch:12, Train_acc:89.3%, Train_loss:0.288, Test_acc:84.9%, Test_loss:0.349, Lr:1.00E-04
Epoch:13, Train_acc:89.6%, Train_loss:0.301, Test_acc:85.3%, Test_loss:0.440, Lr:1.00E-04
Epoch:14, Train_acc:91.0%, Train_loss:0.216, Test_acc:89.8%, Test_loss:0.344, Lr:1.00E-04
Epoch:15, Train_acc:93.2%, Train_loss:0.170, Test_acc:92.4%, Test_loss:0.211, Lr:1.00E-04
Epoch:16, Train_acc:94.8%, Train_loss:0.165, Test_acc:86.7%, Test_loss:0.371, Lr:1.00E-04
Epoch:17, Train_acc:92.9%, Train_loss:0.191, Test_acc:92.0%, Test_loss:0.252, Lr:1.00E-04
Epoch:18, Train_acc:96.1%, Train_loss:0.111, Test_acc:85.3%, Test_loss:0.388, Lr:1.00E-04
Epoch:19, Train_acc:94.1%, Train_loss:0.175, Test_acc:83.6%, Test_loss:0.396, Lr:1.00E-04
Epoch:20, Train_acc:95.7%, Train_loss:0.116, Test_acc:90.2%, Test_loss:0.292, Lr:1.00E-04
Done

进程已结束,退出代码为 0

image.png


http://www.kler.cn/a/546573.html

相关文章:

  • 基于 openEuler 构建 LVS-DR 群集(同网段)。
  • mariadb数据库的安装与部署
  • Django 项目自动化部署脚本全解析
  • 解释下SpringBoot中的服务、依赖项、微服务、分布式的概念和关系
  • Kafka 2.7.1客户端域名连接机制源码深度解析
  • Java面试第一山!《集合》!
  • Linux w 命令
  • Flutter_学习记录_数据更新的学习
  • 通过docker启用rabbitmq插件
  • 腿足机器人之三- 驱动器控制算法PID
  • 是德科技 | AI助力高速线缆卷向下一代速率
  • HARCT 2025 分论坛10:Intelligent Medical Robotics智能医疗机器人
  • Docker 网络的配置与管理
  • 在vscode中拉取gitee里的项目并运行
  • JVM ②-双亲委派模型 || 垃圾回收GC
  • 考公题目(每日一练)
  • 【Qt 为什么 unique_ptr<ClassExample> 和直接声明的 ClassExample对象,connect时的表现形式不一样?】
  • qt QToolButton使用总结
  • TDengine 客户端连接工具 taos-Cli
  • 网络中的传输介质