T7 TensorFlow入门实战——咖啡豆识别
- 🍨 本文為🔗365天深度學習訓練營 中的學習紀錄博客
- 🍖 原作者:K同学啊 | 接輔導、項目定制
一、前期准备
1. 导入数据
# Import the required libraries
import numpy as np
import PIL,pathlib
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
# load the data
data_dir = './data/49-data/'
data_dir = pathlib.Path(data_dir)
data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[2] for path in data_paths]
classeNames
2. 查看数据
image_count = len(list(data_dir.glob('*/*.png')))
print("图片总数为:",image_count)
二、数据预处理
1. 加载数据
# Data loading and preprocessing
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
2. 可视化数据
# Visualize the data
plt.figure(figsize=(10, 4))
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[np.argmax(labels[i])])
plt.axis("off")
# Check the shape of the data
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
3. 配置数据集
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Normalize the data
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(val_ds))
first_image = image_batch[0]
# 查看归一化后的数据
print(np.min(first_image), np.max(first_image))
三、训练模型
1. 构建VGG-16网络模型
结构说明:
- 13个卷积层(Convolutional Layer),分别用
blockX_convX
表示
- 3个全连接层(Fully connected Layer),分别用
fcX
与predictions
表示
- 5个池化层(Pool layer),分别用
blockX_pool
表示
VGG-16
包含了16个隐藏层(13个卷积层和3个全连接层),故称为VGG-16
# Define the model
def VGG16(nb_classes, input_shape):
input_tensor = Input(shape=input_shape)
# 1st block
x = Conv2D(64, (3,3), activation='relu', padding='same',name='block1_conv1')(input_tensor)
x = Conv2D(64, (3,3), activation='relu', padding='same',name='block1_conv2')(x)
x = MaxPooling2D((2,2), strides=(2,2), name = 'block1_pool')(x)
# 2nd block
x = Conv2D(128, (3,3), activation='relu', padding='same',name='block2_conv1')(x)
x = Conv2D(128, (3,3), activation='relu', padding='same',name='block2_conv2')(x)
x = MaxPooling2D((2,2), strides=(2,2), name = 'block2_pool')(x)
# 3rd block
x = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv1')(x)
x = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv2')(x)
x = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name = 'block3_pool')(x)
# 4th block
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv1')(x)
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv2')(x)
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name = 'block4_pool')(x)
# 5th block
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv1')(x)
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv2')(x)
x = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name = 'block5_pool')(x)
# full connection
x = Flatten()(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
output_tensor = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(input_tensor, output_tensor)
return model
model=VGG16(len(class_names), (img_width, img_height, 3))
model.summary() # 打印网络结构
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
flatten (Flatten) (None, 25088) 0
fc1 (Dense) (None, 4096) 102764544
fc2 (Dense) (None, 4096) 16781312
predictions (Dense) (None, 4) 16388
=================================================================
Total params: 134276932 (512.23 MB)
Trainable params: 134276932 (512.23 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
2. 编译模型
# 设置初始学习率
initial_learning_rate = 1e-4
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=30, # 敲黑板!!!这里是指 steps,不是指epochs
decay_rate=0.92, # lr经过一次衰减就会变成 decay_rate*lr
staircase=True)
# 设置优化器
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
3. 训练模型
epochs = 20
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
Epoch 1/20
30/30 [==============================] - 1813s 60s/step - loss: 1.3844 - accuracy: 0.2635 - val_loss: 1.3721 - val_accuracy: 0.2208
Epoch 2/20
30/30 [==============================] - 1517s 51s/step - loss: 1.1250 - accuracy: 0.4042 - val_loss: 1.1268 - val_accuracy: 0.4792
Epoch 3/20
30/30 [==============================] - 1526s 51s/step - loss: 0.7898 - accuracy: 0.5875 - val_loss: 0.6452 - val_accuracy: 0.7333
Epoch 4/20
30/30 [==============================] - 1062s 35s/step - loss: 0.6062 - accuracy: 0.6760 - val_loss: 0.5274 - val_accuracy: 0.7792
Epoch 5/20
30/30 [==============================] - 1040s 35s/step - loss: 0.4209 - accuracy: 0.8010 - val_loss: 0.6662 - val_accuracy: 0.7917
Epoch 6/20
30/30 [==============================] - 1036s 35s/step - loss: 0.2564 - accuracy: 0.8927 - val_loss: 0.1194 - val_accuracy: 0.9583
Epoch 7/20
30/30 [==============================] - 936s 31s/step - loss: 0.2507 - accuracy: 0.9146 - val_loss: 0.1429 - val_accuracy: 0.9542
Epoch 8/20
30/30 [==============================] - 892s 30s/step - loss: 0.1098 - accuracy: 0.9594 - val_loss: 0.0890 - val_accuracy: 0.9625
Epoch 9/20
30/30 [==============================] - 912s 30s/step - loss: 0.0932 - accuracy: 0.9615 - val_loss: 0.0672 - val_accuracy: 0.9625
Epoch 10/20
30/30 [==============================] - 1102s 37s/step - loss: 0.0483 - accuracy: 0.9823 - val_loss: 0.0932 - val_accuracy: 0.9625
Epoch 11/20
30/30 [==============================] - 1394s 47s/step - loss: 0.0722 - accuracy: 0.9740 - val_loss: 0.1494 - val_accuracy: 0.9458
Epoch 12/20
30/30 [==============================] - 970s 32s/step - loss: 0.0269 - accuracy: 0.9917 - val_loss: 0.0528 - val_accuracy: 0.9875
Epoch 13/20
30/30 [==============================] - 922s 31s/step - loss: 0.0059 - accuracy: 0.9979 - val_loss: 0.1447 - val_accuracy: 0.9792
Epoch 14/20
30/30 [==============================] - 1347s 46s/step - loss: 0.0266 - accuracy: 0.9906 - val_loss: 0.1388 - val_accuracy: 0.9500
Epoch 15/20
30/30 [==============================] - 1397s 47s/step - loss: 0.0275 - accuracy: 0.9906 - val_loss: 0.0646 - val_accuracy: 0.9833
Epoch 16/20
30/30 [==============================] - 994s 33s/step - loss: 0.0384 - accuracy: 0.9875 - val_loss: 0.7649 - val_accuracy: 0.7417
Epoch 17/20
30/30 [==============================] - 899s 30s/step - loss: 0.2894 - accuracy: 0.9062 - val_loss: 0.1398 - val_accuracy: 0.9458
Epoch 18/20
30/30 [==============================] - 905s 30s/step - loss: 0.0691 - accuracy: 0.9729 - val_loss: 0.1766 - val_accuracy: 0.9458
Epoch 19/20
30/30 [==============================] - 915s 31s/step - loss: 0.0553 - accuracy: 0.9865 - val_loss: 0.0646 - val_accuracy: 0.9833
Epoch 20/20
30/30 [==============================] - ETA: 0s - loss: 0.0239 - accuracy: 0.9937
四、模型评估
1. Loss与Accuracy图
# Evaluate the model
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()