当前位置: 首页 > article >正文

P10周:Pytorch实现车牌识别

一、导入数据

from torchvision.transforms import transforms
from torch.utils.data       import DataLoader
from torchvision            import datasets
import torchvision.models   as models
import torch.nn.functional  as F
import torch.nn             as nn
import torch,torchvision

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
device(type='cuda')
1.获取类别名
import os,PIL,random,pathlib
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

data_dir = 'F:/jupyter lab/DL-100-days/datasets/licence_plate_photos/'
data_dir = pathlib.Path(data_dir)

data_paths  = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[5].split("_")[1].split(".")[0] for path in data_paths]
print(classeNames)

data_paths     = list(data_dir.glob('*'))
data_paths_str = [str(path) for path in data_paths]
data_paths_str

2.数据可视化 
plt.figure(figsize=(14,5))
plt.suptitle("数据示例",fontsize=15)

for i in range(18):
    plt.subplot(3,6,i+1)
    # plt.xticks([])
    # plt.yticks([])
    # plt.grid(False)
    
    # 显示图片
    images = plt.imread(data_paths_str[i])
    plt.imshow(images)

plt.show()

3.标签数字化
import numpy as np

char_enum = ["京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁",\
              "豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","军","使"]

number   = [str(i) for i in range(0, 10)]    # 0 到 9 的数字
alphabet = [chr(i) for i in range(65, 91)]   # A 到 Z 的字母

char_set       = char_enum + number + alphabet
char_set_len   = len(char_set)
label_name_len = len(classeNames[0])

# 将字符串数字化
def text2vec(text):
    vector = np.zeros([label_name_len, char_set_len])
    for i, c in enumerate(text):
        idx = char_set.index(c)
        vector[i][idx] = 1.0
    return vector

all_labels = [text2vec(i) for i in classeNames]
4.加载数据文件
import os
import pandas as pd
from torchvision.io import read_image
from torch.utils.data import Dataset
import torch.utils.data as data
from PIL import Image

class MyDataset(data.Dataset):
    def __init__(self, all_labels, data_paths_str, transform):
        self.img_labels = all_labels      # 获取标签信息
        self.img_dir    = data_paths_str  # 图像目录路径
        self.transform  = transform       # 目标转换函数

    def __len__(self):
        return len(self.img_labels)

    def __getitem__(self, index):
        image    = Image.open(self.img_dir[index]).convert('RGB')#plt.imread(self.img_dir[index])  # 使用 torchvision.io.read_image 读取图像
        label    = self.img_labels[index]  # 获取图像对应的标签
        
        if self.transform:
            image = self.transform(image)
            
        return image, label  # 返回图像和标签
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std =[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = MyDataset(all_labels, data_paths_str, train_transforms)
total_data
<__main__.MyDataset at 0x24892591610>
5.划分数据
train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_size,test_size
(10940, 2735)
train_dl = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=16,
                                           shuffle=True)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=16,
                                          shuffle=True)

print("The number of images in a training set is: ", len(train_loader)*16)
print("The number of images in a test set is: ", len(test_loader)*16)
print("The number of batches per epoch is: ", len(train_loader))
The number of images in a training set is:  10944
The number of images in a test set is:  2736
The number of batches per epoch is:  684
for X, y in test_loader:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break
Shape of X [N, C, H, W]:  torch.Size([16, 3, 224, 224])
Shape of y:  torch.Size([16, 7, 69]) torch.float64

二、自建模型

class Network_bn(nn.Module):
    def __init__(self):
        super(Network_bn, self).__init__()
        """
        nn.Conv2d()函数:
        第一个参数(in_channels)是输入的channel数量
        第二个参数(out_channels)是输出的channel数量
        第三个参数(kernel_size)是卷积核大小
        第四个参数(stride)是步长,默认为1
        第五个参数(padding)是填充大小,默认为0
        """
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn2 = nn.BatchNorm2d(12)
        self.pool = nn.MaxPool2d(2,2)
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn4 = nn.BatchNorm2d(24)
        self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn5 = nn.BatchNorm2d(24)
        self.fc1 = nn.Linear(24*50*50, label_name_len*char_set_len)
        self.reshape = Reshape([label_name_len,char_set_len])

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))      
        x = F.relu(self.bn2(self.conv2(x)))     
        x = self.pool(x)                        
        x = F.relu(self.bn4(self.conv4(x)))     
        x = F.relu(self.bn5(self.conv5(x)))  
        x = self.pool(x)                        
        x = x.view(-1, 24*50*50)
        x = self.fc1(x)
        
        # 最终reshape
        x = self.reshape(x)

        return x
    
# 定义Reshape层
class Reshape(nn.Module):
    def __init__(self, shape):
        super(Reshape, self).__init__()
        self.shape = shape

    def forward(self, x):
        return x.view(x.size(0), *self.shape)

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = Network_bn().to(device)
model
Using cuda device
Network_bn(
  (conv1): Conv2d(3, 12, kernel_size=(5, 5), stride=(1, 1))
  (bn1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (conv2): Conv2d(12, 12, kernel_size=(5, 5), stride=(1, 1))
  (bn2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (conv4): Conv2d(12, 24, kernel_size=(5, 5), stride=(1, 1))
  (bn4): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (conv5): Conv2d(24, 24, kernel_size=(5, 5), stride=(1, 1))
  (bn5): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (fc1): Linear(in_features=60000, out_features=483, bias=True)
  (reshape): Reshape()
)
import torchsummary

''' 显示网络结构 '''
torchsummary.summary(model, (3, 224, 224))
----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 12, 220, 220]             912
       BatchNorm2d-2         [-1, 12, 220, 220]              24
            Conv2d-3         [-1, 12, 216, 216]           3,612
       BatchNorm2d-4         [-1, 12, 216, 216]              24
         MaxPool2d-5         [-1, 12, 108, 108]               0
            Conv2d-6         [-1, 24, 104, 104]           7,224
       BatchNorm2d-7         [-1, 24, 104, 104]              48
            Conv2d-8         [-1, 24, 100, 100]          14,424
       BatchNorm2d-9         [-1, 24, 100, 100]              48
        MaxPool2d-10           [-1, 24, 50, 50]               0
           Linear-11                  [-1, 483]      28,980,483
          Reshape-12                [-1, 7, 69]               0
================================================================
Total params: 29,006,799
Trainable params: 29,006,799
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 26.56
Params size (MB): 110.65
Estimated Total Size (MB): 137.79
----------------------------------------------------------------

三、模型训练

1.优化器、训练、损失函数
learn_rate = 1e-4 
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
loss_fn = nn.CrossEntropyLoss()
def train(dataloader, model, optimizer, loss_fn):
    size = len(dataloader.dataset)  # 数据集大小
    num_batches = len(dataloader)   # 批次数目
    
    model.train()
    train_loss, correct = 0.0, 0.0  # 初始化为浮点数
    
    for X, y in dataloader:
        X, y = X.to(device), y.to(device)
        
        # 前向传播
        pred = model(X)
        
        # 确保 pred 和 y 的形状匹配 [N, 7, 69]
        pred_flat = pred.view(-1, 69)  # [N * 7, 69]
        y_flat = y.view(-1, 69)  # [N * 7, 69]

        # 计算损失
        loss = loss_fn(pred_flat, y_flat.float())
        
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 更新训练损失和准确率
        train_loss += loss.item()

        # 计算准确率(例如,可以计算每个位置上的平均准确率)
        with torch.no_grad():
            pred_probs = F.sigmoid(pred_flat)
            batch_correct = ((pred_probs > 0.5) == y_flat.bool()).float().mean().item()
            correct += batch_correct

    # 计算平均损失和准确率
    train_loss /= num_batches
    train_acc = correct / num_batches

    return train_acc, train_loss
def test(dataloader, model, loss_fn):
    num_batches = len(dataloader)  # 批次数目
    
    test_loss, correct = 0.0, 0.0  # 初始化为浮点数
    
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            
            pred = model(X)
            # 确保 pred 和 y 的形状匹配 [N, 7, 69]
            pred_flat = pred.view(-1, 69)  # [N * 7, 69]
            y_flat = y.view(-1, 69)  # [N * 7, 69]

            # 计算损失
            loss = loss_fn(pred_flat, y_flat.float())
            test_loss += loss.item()
            
            # 计算准确率(例如,可以计算每个位置上的平均准确率)
            pred_probs = F.sigmoid(pred_flat)
            batch_correct = ((pred_probs > 0.5) == y_flat.bool()).float().mean().item()
            correct += batch_correct
    
    # 计算平均损失和准确率
    test_loss /= num_batches
    test_acc = correct / num_batches
    
    return test_acc, test_loss
2.模型的训练
epochs = 20

train_acc, train_loss, test_acc, test_loss = [], [], [], []

for epoch in range(epochs):
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, optimizer, loss_fn)
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn) 
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss) 
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    # 输出
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}')
    print(template.format(epoch + 1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
Epoch: 1, Train_acc:96.9%, Train_loss:2.583, Test_acc:98.3%, Test_loss:1.517
Epoch: 2, Train_acc:98.9%, Train_loss:0.777, Test_acc:98.8%, Test_loss:0.912
Epoch: 3, Train_acc:99.3%, Train_loss:0.257, Test_acc:99.1%, Test_loss:0.729
Epoch: 4, Train_acc:99.5%, Train_loss:0.101, Test_acc:99.0%, Test_loss:0.749
Epoch: 5, Train_acc:99.6%, Train_loss:0.053, Test_acc:99.0%, Test_loss:0.816
Epoch: 6, Train_acc:99.5%, Train_loss:0.063, Test_acc:99.1%, Test_loss:0.800
Epoch: 7, Train_acc:99.5%, Train_loss:0.076, Test_acc:99.0%, Test_loss:0.889
Epoch: 8, Train_acc:99.6%, Train_loss:0.068, Test_acc:99.2%, Test_loss:0.887
Epoch: 9, Train_acc:99.7%, Train_loss:0.038, Test_acc:99.2%, Test_loss:0.838
Epoch:10, Train_acc:99.7%, Train_loss:0.032, Test_acc:99.2%, Test_loss:0.828
Epoch:11, Train_acc:99.7%, Train_loss:0.028, Test_acc:99.2%, Test_loss:0.821
Epoch:12, Train_acc:99.7%, Train_loss:0.027, Test_acc:99.2%, Test_loss:0.913
Epoch:13, Train_acc:99.7%, Train_loss:0.029, Test_acc:99.2%, Test_loss:0.818
Epoch:14, Train_acc:99.7%, Train_loss:0.022, Test_acc:99.2%, Test_loss:0.875
Epoch:15, Train_acc:99.7%, Train_loss:0.018, Test_acc:99.3%, Test_loss:0.848
Epoch:16, Train_acc:99.8%, Train_loss:0.017, Test_acc:99.2%, Test_loss:1.060
Epoch:17, Train_acc:99.7%, Train_loss:0.022, Test_acc:99.2%, Test_loss:0.968
Epoch:18, Train_acc:99.7%, Train_loss:0.015, Test_acc:99.3%, Test_loss:0.847
Epoch:19, Train_acc:99.8%, Train_loss:0.012, Test_acc:99.3%, Test_loss:0.815
Epoch:20, Train_acc:99.8%, Train_loss:0.007, Test_acc:99.3%, Test_loss:0.842

四、结果分析

import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training= Loss')
plt.show()

五、学习心得 

1.本周对无法分类的数据集,进行导入和识别,并搭建了相关的自建模型。这个自建的 Network_bn 模型是一个卷积神经网络(CNN),模型中使用了批标准化(Batch Normalization, BN)和池化(Pooling)层,共有四组卷积和批标准化。

2.计算的training_loss的数值过大。这点需要考虑。


http://www.kler.cn/a/580346.html

相关文章:

  • 华为eNSP:实验 配置P2P网络类型
  • 批量删除多个 Excel 文件中的宏
  • 命名管道的创建和通信实现
  • stm32 f4 flash 调用时卡死
  • LeetCode 404. 左叶子之和 java题解
  • Git和GitHub基础教学
  • Netty入门教程
  • 【软考-架构】11.1、面向对象基本概念-分析设计测试
  • 配置 Thunderbird 以使用 outlook 邮箱
  • 深入解析 configService.addListener 使用中的注意事项
  • 如何利用 Excel 表格实现精准文件批量重命名教程
  • Python 编程题 第八节:字符串变形、压缩字符串、三个数的最大乘积、判定字符是否唯一、IP地址转换
  • ⭐LeetCode周赛 3468. 可行数组的数目——暴力与数学⭐
  • SpringBoot 如何调用 WebService 接口
  • C#主流日志库深度对比:NLog、log4net与Serilog如何选择?
  • 在vs中无法用QtDesigner打开ui文件的解决方法
  • BGP(三)联盟、反射器
  • 区块链概述及比特币工作原理
  • DeepSeek开源Day5:3FSsmallpond技术详解
  • 最大括号深度