本人使用下面的代码实现了图片分类模型的训练,使用了3000张9个类别的图片,在测试集上准确率90%左右。我使用的显卡是gt1030,是的,只有2gb显存的gt1030,它能够运行和完成任务。
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers,regularizers
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import datetime
import json
# 获取当前时间并格式化
now = datetime.datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M%S")
# 路径设置
train_dir = 'D:\\机器学习\\数据集\\照片分类数据集'
model_h5_save_path = "./trainresult/trainedmodels/model.h5"
model_pb_save_path = "./trainresult/trainedmodels/modelpb"
autoave_best_model_pb_path = f"./trainresult/trainedmodels/bestmodel_{timestamp}"+"_epoch_{epoch:02d}_valloss_{val_loss:.2f}_valaccury_{val_accuracy:.2f}"
autoave_best_model_h5_path = f"./trainresult/trainedmodels/bestmodel_{timestamp}"+"_epoch_{epoch:02d}_valloss_{val_loss:.2f}_valaccury_{val_accuracy:.2f}.h5"
json_save_path = './trainresult/class_indices.json'
# 图片尺寸
pic_width = 224
pic_height = 224
# 产生数据集
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.2,
height_shift_range = 0.2,
validation_split=0.2, #其中0.2划分为验证集
)
# 训练集
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(pic_height,pic_width),
batch_size=16,#batch_size小训练慢但是降低内存压力
class_mode='categorical',
subset='training',#表示训练集
shuffle=True,
)
# 验证集
valid_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(pic_height, pic_width),
batch_size=16,
class_mode='categorical',
subset='validation',#表示验证集
shuffle=True,
)
# 搭建神经网络模型
model = tf.keras.models.Sequential()
# 卷积层1
model.add(tf.keras.layers.Conv2D(64, (3,3), input_shape=(pic_height, pic_width, 3), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
# 卷积层2
model.add(tf.keras.layers.Conv2D(64, (3,3), padding='same'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# 卷积层3
model.add(tf.keras.layers.Conv2D(64, (3,3), padding='same'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
# 卷积层4
model.add(tf.keras.layers.Conv2D(128, (3,3), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# 卷积层5
model.add(tf.keras.layers.Conv2D(128, (3,3), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# 卷积层6
model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.Dropout(0.5))
# 输入层
model.add(tf.keras.layers.Flatten())
# 隐层1
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# 隐层1
model.add(tf.keras.layers.Dense(256,kernel_regularizer=regularizers.l2(0.001)))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
# 隐层2
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# 隐层3
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# 输出层
model.add(layers.Dense(train_generator.num_classes, activation='softmax'))
# 模型可视化
model.summary()
# 模型编译
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 早停函数
early_stopping_callback = EarlyStopping(
monitor='accuracy',
patience=20,
mode='auto'
)
# 自动保存效果最好的模型
checkpoint_callback = ModelCheckpoint(
filepath=autoave_best_model_pb_path,#路径
verbose=0,#0不输出日志 1输出日志
save_best_only=True,#保存最好的模型
monitor='val_accuracy',#监控指标
mode='max',#选择最大指标的模式
period=1,#每多少轮(epoch)保存一次模型
)
# 模型训练
model.fit(
train_generator,
#steps_per_epoch=1000,
epochs=300,
validation_data=valid_generator,
#validation_steps=50,
callbacks=[early_stopping_callback,checkpoint_callback]
)
# 保存模型
# h5格式
model.save(model_h5_save_path)
# saved_model格式 下面的都可以
#model.save(model_pb_save_path,save_format="tf")
#tf.keras.models.save_model(model,model_pb_save_path)
tf.saved_model.save(model, model_pb_save_path)
# 保存标签的字典
# 从数据生成器中获取类别标签的字典
class_indices = train_generator.class_indices
# 将字典保存为 JSON 文件
with open(json_save_path, 'w') as f:
json.dump(class_indices, f)
导出的一些模型结构
D:.
└─trainedmodels
├─bestmodel_20231201_035144_epoch_241_valloss_0.44_valaccury_0.90
│ ├─assets
│ └─variables
└─modelpb
├─assets
└─variables
使用模型进行预测的代码
import tensorflow as tf
import numpy as np
# 路径
pic_width = 224
pic_height = 224
predict_path = "D:/机器学习/数据集/照片分类测试/594512936a9e5c0f98e211a7d2af7e28.jpg"
model_path = "./model.h5"
json_path = './class_indices.json'
# 读取图片
image = tf.io.read_file(predict_path)
# rgb三通道
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [pic_height, pic_width])
# 归一化到[0,1]范围
image /= 255.0
#
it = np.array(list(image))
# 调整维度
itt = it.reshape(1,pic_width,pic_height,3)
# 加载模型
from keras.models import load_model
model = load_model(model_path)
# 对图像做预测
prediction = model.predict(itt)
# 打印预测结果
import json
#导入类别标签的字典
with open(json_path, 'r') as fp:
class_indices = json.load(fp)
class_names = list(class_indices.keys())
#输出各个分类的预测概率
print('\n各个分类的概率:')
for i in range(len(class_names)):
print('{} {:.4f}'.format(class_names[i],prediction[0][i]))
# 设置分类阈值,如果低于这个值,设为其它类别
threshold = 0.5
# 阈值处理
for i in range(len(prediction)):
if np.max(prediction[i]) < threshold:
print("预测结果:其它类别")
break
else:#for循环完整执行,说明可能性最高的类别超过了阈值
#输出预测结果对应的类别名字
predicted_class = np.argmax(prediction)
predicted_class_name = class_names[predicted_class]
print('预测结果:{}'.format(predicted_class_name))
其中可以使用saved_model_cli.py来查看输入输出层的名字信息
1.使用办法
python saved_model_cli.py
usage: saved_model_cli.py [-h] [-v]
{show,run,scan,convert,aot_compile_cpu} ...
saved_model_cli.py: error: too few arguments
2.指定savedmodel目录
python saved_model_cli.py show --dir D:\modelpb
The given SavedModel contains the following tag-sets:
'serve'
3.查看tag签名
python saved_model_cli.py show --dir D:\modelpb --tag_set serve
The given SavedModel MetaGraphDef contains SignatureDefs with the following keys:
SignatureDef key: "__saved_model_init_op"
SignatureDef key: "serving_default"
4.查看输入输出层名字
python saved_model_cli.py show --dir D:\modelpb --tag_set serve --signature_def serving_default
The given SavedModel SignatureDef contains the following input(s):
inputs['conv2d_input'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 224, 224, 3)
name: serving_default_conv2d_input:0
The given SavedModel SignatureDef contains the following output(s):
outputs['dense_1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 9)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
名字是serving_default_conv2d_input和 StatefulPartitionedCall
我使用ResNet50进行分类训练的代码准确率也差不多
import tensorflow as tf
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import regularizers
from keras.callbacks import EarlyStopping
#设置数据集路径和图像大小
train_data_dir = 'D:\\机器学习\\数据集\\照片分类数据集'
img_width, img_height = 224, 224
model_pb_save_path = "./modelpb"
model_save_path = "./modelresnet50.h5"
# 准备数据增强处理器
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2,
)
# 生成训练数据和测试数据
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height,img_width),
batch_size=32,
class_mode='categorical',
subset='training',
shuffle=True,
)
validation_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height,img_width),
batch_size=32,
class_mode='categorical',
subset='validation',
shuffle=True,
)
# 设置ResNet50模型
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
# 添加自定义层
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(256, activation='relu')(x)
x = Dense(128, activation='relu')(x)
predictions = Dense(train_generator.num_classes, activation='softmax')(x)
# 冻结预先训练的层 不会参与训练
for layer in base_model.layers:
layer.trainable = False
# 创建模型对象并编译
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# 早停函数
early_stopping_callback = EarlyStopping(
monitor='val_loss',
patience=10,
mode='auto'
)
# 训练模型并保存
model.fit(
train_generator,
#steps_per_epoch=2000,
epochs=100,
validation_data=validation_generator,
#validation_steps=800,
callbacks=[early_stopping_callback]
)
#保存h5模型
model.save(model_save_path)
#保存pb模型
tf.saved_model.save(model, model_pb_save_path)
#保存标签的字典
import json
# 从数据生成器中获取类别标签的字典
class_indices = train_generator.class_indices
# 将字典保存为 JSON 文件
with open('class_indices.json', 'w') as f:
json.dump(class_indices, f)