当前位置: 首页 > article >正文

J2学习打卡

  • 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
  • 🍖 原作者:K同学啊

ResNet50V2

import torch
import torch.nn as nn
import torch.nn.functional as F

class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
        super(Bottleneck, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = norm_layer(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = norm_layer(planes)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = norm_layer(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out

class ResNet50V2(nn.Module):
    def __init__(self, block, layers, num_classes=1000, norm_layer=None):
        super(ResNet50V2, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        self.inplanes = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = norm_layer(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, norm_layer=norm_layer)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

    def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        downsample = None

        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
                norm_layer(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample, norm_layer))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_layer=norm_layer))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x

def resnet50v2(num_classes=1000):
    return ResNet50V2(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)

# Example usage
model = resnet50v2()
print(model)
ResNet50V2(
  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (relu): ReLU(inplace=True)
  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
  (layer1): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer2): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (3): Bottleneck(
      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer3): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (3): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (4): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (5): Bottleneck(
      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (layer4): Sequential(
    (0): Bottleneck(
      (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
      (downsample): Sequential(
        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): Bottleneck(
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (2): Bottleneck(
      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
  (fc): Linear(in_features=2048, out_features=1000, bias=True)
)
# 数据预处理和加载
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_dir = r"C:\Users\11054\Desktop\kLearning\J1_learning\bird_photos"

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
])

train_dataset = datasets.ImageFolder(data_dir, transform=transform)
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])

train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)

class_names = train_dataset.dataset.classes
# Example of initialization
model = ResNet50V2(Bottleneck, [3, 4, 6, 3],num_classes=4)
# model = models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = model.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练模型
epochs = 100
train_losses, val_losses = [], []
train_acc, val_acc = [], []

best_val_loss = float('inf')
best_model_wts = None  # 用于保存最好的模型权重
for epoch in range(epochs):
    # Training
    model.train()
    running_loss, running_corrects = 0.0, 0

    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)
        _, preds = torch.max(outputs, 1)
        running_corrects += torch.sum(preds == labels.data)

    epoch_loss = running_loss / train_size
    epoch_acc = running_corrects.double() / train_size

    train_losses.append(epoch_loss)
    train_acc.append(epoch_acc.item())

    # Validation
    model.eval()
    val_running_loss, val_running_corrects = 0.0, 0

    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            val_running_loss += loss.item() * inputs.size(0)
            _, preds = torch.max(outputs, 1)
            val_running_corrects += torch.sum(preds == labels.data)

    val_epoch_loss = val_running_loss / val_size
    val_epoch_acc = val_running_corrects.double() / val_size

    val_losses.append(val_epoch_loss)
    val_acc.append(val_epoch_acc.item())
    if val_epoch_loss < best_val_loss:
        best_val_loss = val_epoch_loss
        best_model_wts = model.state_dict()  # 记录当前模型的权重
    print(
        f'Epoch {epoch}/{epochs - 1}, Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f}, Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_acc:.4f}')

# 在训练结束后,加载最优的模型权重
model.load_state_dict(best_model_wts)
Epoch 0/99, Train Loss: 1.5081, Train Acc: 0.4049, Val Loss: 1.1780, Val Acc: 0.4867
Epoch 1/99, Train Loss: 1.1824, Train Acc: 0.5243, Val Loss: 1.1830, Val Acc: 0.5310
Epoch 2/99, Train Loss: 1.0909, Train Acc: 0.5708, Val Loss: 1.6719, Val Acc: 0.4425
Epoch 3/99, Train Loss: 1.0141, Train Acc: 0.6217, Val Loss: 1.3627, Val Acc: 0.4867
Epoch 4/99, Train Loss: 0.8892, Train Acc: 0.6527, Val Loss: 1.1815, Val Acc: 0.6372
Epoch 5/99, Train Loss: 0.8106, Train Acc: 0.6748, Val Loss: 0.9670, Val Acc: 0.6195
Epoch 6/99, Train Loss: 0.7749, Train Acc: 0.7168, Val Loss: 0.8506, Val Acc: 0.6283
Epoch 7/99, Train Loss: 0.7296, Train Acc: 0.6903, Val Loss: 1.3069, Val Acc: 0.4513
Epoch 8/99, Train Loss: 0.6890, Train Acc: 0.7389, Val Loss: 1.6562, Val Acc: 0.6195
Epoch 9/99, Train Loss: 0.6949, Train Acc: 0.7566, Val Loss: 0.7034, Val Acc: 0.7168
Epoch 10/99, Train Loss: 0.6817, Train Acc: 0.7500, Val Loss: 0.8593, Val Acc: 0.7257
Epoch 11/99, Train Loss: 0.6485, Train Acc: 0.7677, Val Loss: 0.8179, Val Acc: 0.6903
Epoch 12/99, Train Loss: 0.5581, Train Acc: 0.8097, Val Loss: 0.6367, Val Acc: 0.7434
Epoch 13/99, Train Loss: 0.4954, Train Acc: 0.8097, Val Loss: 0.6925, Val Acc: 0.7257
Epoch 14/99, Train Loss: 0.5071, Train Acc: 0.8164, Val Loss: 0.6167, Val Acc: 0.7699
Epoch 15/99, Train Loss: 0.5643, Train Acc: 0.8142, Val Loss: 0.5488, Val Acc: 0.8142
Epoch 16/99, Train Loss: 0.5602, Train Acc: 0.7942, Val Loss: 0.6716, Val Acc: 0.7611
Epoch 17/99, Train Loss: 0.4799, Train Acc: 0.8296, Val Loss: 0.5589, Val Acc: 0.7788
Epoch 18/99, Train Loss: 0.5476, Train Acc: 0.8164, Val Loss: 0.5755, Val Acc: 0.7434
Epoch 19/99, Train Loss: 0.4808, Train Acc: 0.8496, Val Loss: 0.5573, Val Acc: 0.7876
Epoch 20/99, Train Loss: 0.4338, Train Acc: 0.8296, Val Loss: 0.7486, Val Acc: 0.7788
Epoch 21/99, Train Loss: 0.4934, Train Acc: 0.7965, Val Loss: 0.5907, Val Acc: 0.7788
Epoch 22/99, Train Loss: 0.5047, Train Acc: 0.8164, Val Loss: 2.3444, Val Acc: 0.5929
Epoch 23/99, Train Loss: 0.4283, Train Acc: 0.8363, Val Loss: 0.5185, Val Acc: 0.8142
Epoch 24/99, Train Loss: 0.4079, Train Acc: 0.8518, Val Loss: 0.4883, Val Acc: 0.8407
Epoch 25/99, Train Loss: 0.4335, Train Acc: 0.8518, Val Loss: 0.8051, Val Acc: 0.7876
Epoch 26/99, Train Loss: 0.3173, Train Acc: 0.8827, Val Loss: 0.5862, Val Acc: 0.8142
Epoch 27/99, Train Loss: 0.3724, Train Acc: 0.8739, Val Loss: 0.7041, Val Acc: 0.7788
Epoch 28/99, Train Loss: 0.3117, Train Acc: 0.8850, Val Loss: 0.5650, Val Acc: 0.8053
Epoch 29/99, Train Loss: 0.3593, Train Acc: 0.8894, Val Loss: 0.5180, Val Acc: 0.8142
Epoch 30/99, Train Loss: 0.3348, Train Acc: 0.8850, Val Loss: 0.5450, Val Acc: 0.7788
Epoch 31/99, Train Loss: 0.2837, Train Acc: 0.9049, Val Loss: 0.9638, Val Acc: 0.8053
Epoch 32/99, Train Loss: 0.3975, Train Acc: 0.8584, Val Loss: 0.6834, Val Acc: 0.7876
Epoch 33/99, Train Loss: 0.4799, Train Acc: 0.8363, Val Loss: 1.0342, Val Acc: 0.6903
Epoch 34/99, Train Loss: 0.3318, Train Acc: 0.8827, Val Loss: 0.4223, Val Acc: 0.8673
Epoch 35/99, Train Loss: 0.2945, Train Acc: 0.8960, Val Loss: 0.6039, Val Acc: 0.7876
Epoch 36/99, Train Loss: 0.2797, Train Acc: 0.9004, Val Loss: 0.3947, Val Acc: 0.8407
Epoch 37/99, Train Loss: 0.3114, Train Acc: 0.8761, Val Loss: 0.3926, Val Acc: 0.8230
Epoch 38/99, Train Loss: 0.2819, Train Acc: 0.9049, Val Loss: 0.4662, Val Acc: 0.7876
Epoch 39/99, Train Loss: 0.2684, Train Acc: 0.9093, Val Loss: 0.6103, Val Acc: 0.8319
Epoch 40/99, Train Loss: 0.2565, Train Acc: 0.9159, Val Loss: 0.4893, Val Acc: 0.8230
Epoch 41/99, Train Loss: 0.2801, Train Acc: 0.9093, Val Loss: 0.3942, Val Acc: 0.8584
Epoch 42/99, Train Loss: 0.2272, Train Acc: 0.9137, Val Loss: 0.4284, Val Acc: 0.8496
Epoch 43/99, Train Loss: 0.2414, Train Acc: 0.9071, Val Loss: 0.4650, Val Acc: 0.8673
Epoch 44/99, Train Loss: 0.3233, Train Acc: 0.8960, Val Loss: 0.4346, Val Acc: 0.8053
Epoch 45/99, Train Loss: 0.3700, Train Acc: 0.8717, Val Loss: 0.5083, Val Acc: 0.8230
Epoch 46/99, Train Loss: 0.2476, Train Acc: 0.9071, Val Loss: 0.4358, Val Acc: 0.8938
Epoch 47/99, Train Loss: 0.2164, Train Acc: 0.9159, Val Loss: 0.5519, Val Acc: 0.8142
Epoch 48/99, Train Loss: 0.2091, Train Acc: 0.9159, Val Loss: 0.3499, Val Acc: 0.9027
Epoch 49/99, Train Loss: 0.2034, Train Acc: 0.9314, Val Loss: 0.3615, Val Acc: 0.8584
Epoch 50/99, Train Loss: 0.2402, Train Acc: 0.9336, Val Loss: 0.6434, Val Acc: 0.8053
Epoch 51/99, Train Loss: 0.1273, Train Acc: 0.9558, Val Loss: 0.2781, Val Acc: 0.8938
Epoch 52/99, Train Loss: 0.1549, Train Acc: 0.9580, Val Loss: 0.3524, Val Acc: 0.9115
Epoch 53/99, Train Loss: 0.2179, Train Acc: 0.9381, Val Loss: 0.3519, Val Acc: 0.8850
Epoch 54/99, Train Loss: 0.1922, Train Acc: 0.9381, Val Loss: 0.3164, Val Acc: 0.8673
Epoch 55/99, Train Loss: 0.1393, Train Acc: 0.9513, Val Loss: 0.6863, Val Acc: 0.8053
Epoch 56/99, Train Loss: 0.1997, Train Acc: 0.9469, Val Loss: 0.3548, Val Acc: 0.8584
Epoch 57/99, Train Loss: 0.1363, Train Acc: 0.9491, Val Loss: 0.5302, Val Acc: 0.8938
Epoch 58/99, Train Loss: 0.1810, Train Acc: 0.9513, Val Loss: 0.5357, Val Acc: 0.8584
Epoch 59/99, Train Loss: 0.1574, Train Acc: 0.9469, Val Loss: 0.5047, Val Acc: 0.8584
Epoch 60/99, Train Loss: 0.1661, Train Acc: 0.9403, Val Loss: 0.7250, Val Acc: 0.7345
Epoch 61/99, Train Loss: 0.1722, Train Acc: 0.9646, Val Loss: 0.4548, Val Acc: 0.8584
Epoch 62/99, Train Loss: 0.1271, Train Acc: 0.9558, Val Loss: 0.1897, Val Acc: 0.9292
Epoch 63/99, Train Loss: 0.1047, Train Acc: 0.9602, Val Loss: 0.2636, Val Acc: 0.9204
Epoch 64/99, Train Loss: 0.1882, Train Acc: 0.9403, Val Loss: 0.5540, Val Acc: 0.8584
Epoch 65/99, Train Loss: 0.1944, Train Acc: 0.9292, Val Loss: 0.3756, Val Acc: 0.8938
Epoch 66/99, Train Loss: 0.0680, Train Acc: 0.9779, Val Loss: 0.2917, Val Acc: 0.9027
Epoch 67/99, Train Loss: 0.0954, Train Acc: 0.9646, Val Loss: 0.4208, Val Acc: 0.8761
Epoch 68/99, Train Loss: 0.1136, Train Acc: 0.9469, Val Loss: 0.7063, Val Acc: 0.8407
Epoch 69/99, Train Loss: 0.0964, Train Acc: 0.9690, Val Loss: 0.3430, Val Acc: 0.8938
Epoch 70/99, Train Loss: 0.0654, Train Acc: 0.9779, Val Loss: 0.3915, Val Acc: 0.8938
Epoch 71/99, Train Loss: 0.1592, Train Acc: 0.9469, Val Loss: 0.4142, Val Acc: 0.8673
Epoch 72/99, Train Loss: 0.1646, Train Acc: 0.9447, Val Loss: 0.5199, Val Acc: 0.8407
Epoch 73/99, Train Loss: 0.2407, Train Acc: 0.9248, Val Loss: 0.5705, Val Acc: 0.8938
Epoch 74/99, Train Loss: 0.1716, Train Acc: 0.9425, Val Loss: 0.3962, Val Acc: 0.8938
Epoch 75/99, Train Loss: 0.0702, Train Acc: 0.9801, Val Loss: 0.3274, Val Acc: 0.8850
Epoch 76/99, Train Loss: 0.0804, Train Acc: 0.9757, Val Loss: 0.4674, Val Acc: 0.8761
Epoch 77/99, Train Loss: 0.0943, Train Acc: 0.9757, Val Loss: 0.4017, Val Acc: 0.8938
Epoch 78/99, Train Loss: 0.1096, Train Acc: 0.9735, Val Loss: 0.5062, Val Acc: 0.8938
Epoch 79/99, Train Loss: 0.0911, Train Acc: 0.9735, Val Loss: 0.6298, Val Acc: 0.8584
Epoch 80/99, Train Loss: 0.1131, Train Acc: 0.9558, Val Loss: 0.4371, Val Acc: 0.8584
Epoch 81/99, Train Loss: 0.1084, Train Acc: 0.9690, Val Loss: 0.2732, Val Acc: 0.9115
Epoch 82/99, Train Loss: 0.0616, Train Acc: 0.9779, Val Loss: 0.3224, Val Acc: 0.9027
Epoch 83/99, Train Loss: 0.1246, Train Acc: 0.9602, Val Loss: 0.3234, Val Acc: 0.8938
Epoch 84/99, Train Loss: 0.1272, Train Acc: 0.9580, Val Loss: 0.3273, Val Acc: 0.8673
Epoch 85/99, Train Loss: 0.0450, Train Acc: 0.9867, Val Loss: 0.1960, Val Acc: 0.9204
Epoch 86/99, Train Loss: 0.0637, Train Acc: 0.9779, Val Loss: 0.2931, Val Acc: 0.9027
Epoch 87/99, Train Loss: 0.1449, Train Acc: 0.9535, Val Loss: 1.2494, Val Acc: 0.7965
Epoch 88/99, Train Loss: 0.4223, Train Acc: 0.8650, Val Loss: 0.6742, Val Acc: 0.8230
Epoch 89/99, Train Loss: 0.1847, Train Acc: 0.9469, Val Loss: 0.2900, Val Acc: 0.9027
Epoch 90/99, Train Loss: 0.1394, Train Acc: 0.9513, Val Loss: 0.3145, Val Acc: 0.8850
Epoch 91/99, Train Loss: 0.1057, Train Acc: 0.9624, Val Loss: 0.3221, Val Acc: 0.8850
Epoch 92/99, Train Loss: 0.1004, Train Acc: 0.9757, Val Loss: 0.3705, Val Acc: 0.8850
Epoch 93/99, Train Loss: 0.0783, Train Acc: 0.9712, Val Loss: 0.3333, Val Acc: 0.9027
Epoch 94/99, Train Loss: 0.0424, Train Acc: 0.9867, Val Loss: 0.3462, Val Acc: 0.9027
Epoch 95/99, Train Loss: 0.0702, Train Acc: 0.9779, Val Loss: 0.2341, Val Acc: 0.9204
Epoch 96/99, Train Loss: 0.0424, Train Acc: 0.9867, Val Loss: 0.2882, Val Acc: 0.9204
Epoch 97/99, Train Loss: 0.0416, Train Acc: 0.9912, Val Loss: 0.2408, Val Acc: 0.9381
Epoch 98/99, Train Loss: 0.0397, Train Acc: 0.9823, Val Loss: 0.2606, Val Acc: 0.9027
Epoch 99/99, Train Loss: 0.1306, Train Acc: 0.9602, Val Loss: 0.4435, Val Acc: 0.8673





<All keys matched successfully>
# 预测模型
model.eval()
plt.figure(figsize=(10, 5))
plt.suptitle("bird")

for inputs, labels in val_loader:
    inputs, labels = inputs.to(device), labels.to(device)
    outputs = model(inputs)
    _, preds = torch.max(outputs, 1)

    for i in range(len(inputs)):
        ax = plt.subplot(2, 4, i + 1)

        img = inputs[i].cpu().numpy().transpose((1, 2, 0))
        plt.imshow(img)
        plt.title(class_names[preds[i]])

        plt.axis("off")
    break

在这里插入图片描述

在这里插入图片描述

个人总结

V2对比原V1区别:

  1. 预激活残差单元(Pre-activation Residual Units):
    ResNet(V1):在每个残差块中,卷积层在批归一化(Batch Normalization)和 ReLU 之前。
    ResNetV2:批归一化和 ReLU 激活在卷积操作之前。这被称为“预激活”结构,这样做有助于梯度流动,使得信息更容易传播,有助于更深的网络训练。

  2. 批归一化的位置(Batch Normalization Position):
    在 ResNetV2 中,所有的卷积操作前都有批归一化层,使得表现更稳定并协助训练深层模型。

  3. 最后的 ReLU 激活:
    ResNetV2 在残差块的最后使用了 ReLU 激活来处理块的输出(在添加 shortcut 之后),这可以让模型训练得更好。

  4. 无顶池化层(No top pooling layer):
    ResNetV2 版本去除了最后的全局平均池化前的顶池化操作(Top Pooling),直接在最后进行全局池化。这简化了网络设计,增强了其通用性。

这些修改使 ResNetV2 具有更好的梯度传递和稳定性(尤其是当网络变得非常深的时候),试验数据显示Val loss 得到了较大的改善


http://www.kler.cn/news/368720.html

相关文章:

  • 数据结构——树、二叉树和森林间的转换
  • 如何制定有效的学习计划
  • STM32之外部中断(实验对射式传感器计次实验)
  • UHF机械高频头的知识和待学习的疑问
  • centos-LAMP搭建与配置(论坛网站)
  • python如何基于numpy pandas完成复杂的数据分析操作?
  • 分账系统适用于那些商家?
  • Spring Boot植物健康系统:绿色科技的创新
  • TensorFlow面试整理-TensorFlow 和 PyTorch 的区别是什么?
  • 论文阅读(二十五):PVTv2: Improved Baselines with Pyramid Vision Transformer
  • SASS转换成CSS步骤
  • 宝塔如何部署Django项目(前后端分离篇)
  • Three.js 使用着色器 实现跳动的心
  • WebView渲染异常导致闪退解决方案
  • 若依学习 后端传过来的数据在控制台打印为空
  • iPhone当U盘使用的方法 - iTunes共享文件夹无法复制到电脑怎么办 - 如何100%写入读出
  • 解决pycharm无法添加conda环境的问题【Conda Environment下没有Existing environment】
  • 机器学习在智能水泥基复合材料中的应用与实践
  • 部署 Traefik 实现 dashboard 与 原生Ingress使用 CRD IngressRoute使用
  • 大语言模型参数传递、model 构建与tokenizer构建(基于llama3模型)
  • 关于洛谷中XJS-SINGA科技站点 系统讨论团队的一些介绍
  • 【网络】:网络基础
  • 地球Online生存天数计算器(java小案例)
  • GPU的使用寿命可能只有1~3年
  • 基于去哪儿旅游出行服务平台旅游推荐网站【源码+安装+讲解+售后+文档】
  • Linux 重启命令全解析:深入理解与应用指南