当前位置: 首页 > article >正文

T11 TensorFlow入门实战——优化器对比实验

  • 🍨 本文為🔗365天深度學習訓練營 中的學習紀錄博客
  • 🍖 原作者:K同学啊 | 接輔導、項目定制

一、前期准备

1. 导入数据

# Import the required libraries
import numpy as np
import PIL, pathlib
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers

import random

# Load the data
data_dir = './data/48-data/'
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[2] for path in data_paths]

image_count = len(list(data_dir.glob('*/*')))
print("Total number of images:", image_count)

二、数据预处理

1. 加载数据

# Data loading and preprocessing
batch_size = 16
img_height = 336
img_width = 336

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="training",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="validation",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)

class_names = train_ds.class_names
print(class_names)
Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']

 2. 检查数据

# Check the shape of the data
for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break

   3. 配置数据集

AUTOTUNE = tf.data.AUTOTUNE

def train_preprocessing(image,label):
    return (image/255.0,label)

train_ds = (
    train_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)           # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

val_ds = (
    val_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)         # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

4. 数据可视化

plt.rcParams['font.family'] = 'SimHei'  # 设置字体为黑体(支持中文)
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

plt.figure(figsize=(10, 8))  # 图形的宽为10高为5
plt.suptitle("数据展示")

for images, labels in train_ds.take(1):
    for i in range(15):
        plt.subplot(4, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)

        # 显示图片
        plt.imshow(images[i])
        # 显示标签
        plt.xlabel(class_names[labels[i]-1])

plt.show()

三、训练模型 

1. 构建模型

def create_model(optimizer='adam'):
    # 加载预训练模型
    vgg16_base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                                                include_top=False,
                                                                input_shape=(img_width, img_height, 3),
                                                                pooling='avg')
    for layer in vgg16_base_model.layers:
        layer.trainable = False

    X = vgg16_base_model.output
    
    X = Dense(170, activation='relu')(X)
    X = BatchNormalization()(X)
    X = Dropout(0.5)(X)

    output = Dense(len(class_names), activation='softmax')(X)
    vgg16_model = Model(inputs=vgg16_base_model.input, outputs=output)

    vgg16_model.compile(optimizer=optimizer,
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])
    return vgg16_model

model1 = create_model(optimizer=tf.keras.optimizers.Adam())
model2 = create_model(optimizer=tf.keras.optimizers.SGD())
model2.summary()
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58889256/58889256 [==============================] - 5s 0us/step
Model: "model_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_2 (InputLayer)        [(None, 336, 336, 3)]     0         
                                                                 
 block1_conv1 (Conv2D)       (None, 336, 336, 64)      1792      
                                                                 
 block1_conv2 (Conv2D)       (None, 336, 336, 64)      36928     
                                                                 
 block1_pool (MaxPooling2D)  (None, 168, 168, 64)      0         
                                                                 
 block2_conv1 (Conv2D)       (None, 168, 168, 128)     73856     
                                                                 
 block2_conv2 (Conv2D)       (None, 168, 168, 128)     147584    
                                                                 
 block2_pool (MaxPooling2D)  (None, 84, 84, 128)       0         
                                                                 
 block3_conv1 (Conv2D)       (None, 84, 84, 256)       295168    
                                                                 
 block3_conv2 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_conv3 (Conv2D)       (None, 84, 84, 256)       590080    
                                                                 
 block3_pool (MaxPooling2D)  (None, 42, 42, 256)       0         
                                                                 
 block4_conv1 (Conv2D)       (None, 42, 42, 512)       1180160   
                                                                 
 block4_conv2 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_conv3 (Conv2D)       (None, 42, 42, 512)       2359808   
                                                                 
 block4_pool (MaxPooling2D)  (None, 21, 21, 512)       0         
                                                                 
 block5_conv1 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv2 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_conv3 (Conv2D)       (None, 21, 21, 512)       2359808   
                                                                 
 block5_pool (MaxPooling2D)  (None, 10, 10, 512)       0         
                                                                 
 global_average_pooling2d_1  (None, 512)               0         
  (GlobalAveragePooling2D)                                       
                                                                 
 dense_2 (Dense)             (None, 170)               87210     
                                                                 
 batch_normalization_1 (Bat  (None, 170)               680       
 chNormalization)                                                
                                                                 
 dropout_1 (Dropout)         (None, 170)               0         
                                                                 
 dense_3 (Dense)             (None, 17)                2907      
                                                                 
=================================================================
Total params: 14805485 (56.48 MB)
Trainable params: 90457 (353.35 KB)
Non-trainable params: 14715028 (56.13 MB)
_________________________________________________________________

 3. 训练模型 

# Train the model
NO_EPOCHS = 50

history_model1  = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2  = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)

四、模型评估

1. Loss与Accuracy图

plt.rcParams['savefig.dpi'] = 300 #图片像素
plt.rcParams['figure.dpi']  = 300 #分辨率

current_time = datetime.now() # 获取当前时间

acc1     = history_model1.history['accuracy']
acc2     = history_model2.history['accuracy']
val_acc1 = history_model1.history['val_accuracy']
val_acc2 = history_model2.history['val_accuracy']

loss1     = history_model1.history['loss']
loss2     = history_model2.history['loss']
val_loss1 = history_model1.history['val_loss']
val_loss2 = history_model2.history['val_loss']

epochs_range = range(len(acc1))

plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, acc1, label='Training Accuracy-Adam')
plt.plot(epochs_range, acc2, label='Training Accuracy-SGD')
plt.plot(epochs_range, val_acc1, label='Validation Accuracy-Adam')
plt.plot(epochs_range, val_acc2, label='Validation Accuracy-SGD')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.xlabel(current_time) # 打卡请带上时间戳,否则代码截图无效
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss1, label='Training Loss-Adam')
plt.plot(epochs_range, loss2, label='Training Loss-SGD')
plt.plot(epochs_range, val_loss1, label='Validation Loss-Adam')
plt.plot(epochs_range, val_loss2, label='Validation Loss-SGD')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
   
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.show()

 2. 评估模型

def test_accuracy_report(model):
    score = model.evaluate(val_ds, verbose=0)
    print('Loss function: %s, accuracy:' % score[0], score[1])
    
test_accuracy_report(model2)

 

 

 

原文地址:https://blog.csdn.net/weixin_43934209/article/details/146568453
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.kler.cn/a/612743.html

相关文章:

  • 谈谈空间复杂度考量,特别是递归调用栈空间消耗?
  • HTTP 状态码与前端 try-catch 捕获关系
  • java八股文之企业场景
  • Oracle数据库数据编程SQL<2.2 DDL 视图、序列>
  • 小白工具PDF转换 PDF转图片 超便捷软件 文件格式转换 简单好用效率高
  • RabbitMQ 核心组件及功能详解
  • 信息隐藏技术
  • Flutter_学习记录_get_cli的使用
  • nginx代理前端请求
  • Spring Boot旅游管理系统
  • 基于python爬虫:requests+BeautifulSoup+MySQL/MongoDB(或:CSV、JSON等格式的文件)+...
  • thinkphp漏洞再现
  • 《C++ 基石:筑牢编程巅峰根基》
  • Dynamic WallPaper-壁纸动态-Mac电脑-4K超高清
  • node-red
  • Ant Design Vue 中的table表格高度塌陷,造成行与行不齐的问题
  • 日记:实际开发中git的常用命令
  • 搭建私人对外git空间
  • 详细介绍Spring MVC的执行流程是怎么样的?
  • 基于物联网的新房甲醛浓度监测系统的设计(论文+源码)