传统的情绪检测是用级联检测器来实现,也可以通过卷积网络训练模型来实现。
为了可以很好的进行图像预测,首先需要进行模型的训练
1.导入模块
from __future__ import print_function import keras from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense,Dropout,Activation,Flatten,BatchNormalization from keras.layers import Conv2D,MaxPooling2D import os
2.情绪种类、图像尺寸大小、训练批次以及训练集和测试集的设置与导入
num_classes=5 img_rows,img_cols=48,48 batch_size=32 train_data_dir='../fer2013/train' validation_data_dir='../fer2013/validation'
3.为了使模型具有更好的泛化性能,需要对数据进行增强处理,为了保持训练’测试前后设置一样,对于测试集相当于不需要增强处理ImageDataGenerator函数介绍
train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=30, shear_range=0.3, zoom_range=0.3, width_shift_range=0.4, height_shift_range=0.4, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator(rescale=1./255)
4.数据生成
train_generator = train_datagen.flow_from_directory( train_data_dir, color_mode='grayscale', target_size=(img_rows,img_cols), batch_size=batch_size, class_mode='categorical', shuffle=True) validation_generator = validation_datagen.flow_from_directory( validation_data_dir, color_mode='grayscale', target_size=(img_rows,img_cols), batch_size=batch_size, class_mode='categorical', shuffle=True)
5.构造模型
model = Sequential()
#Block-1
model.add(Conv2D(32,(3,3),padding='same',kernel_initializer='he_normal',input_shape=(img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),padding='same',kernel_initializer='he_normal',input_shape=(img_rows,img_cols,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
#Block-2
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
#Block-3
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
#Block-4
model.add(Conv2D(256,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3),padding='same',kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
#Block-5
model.add(Flatten())
model.add(Dense(64,kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Block-6
model.add(Dense(64,kernel_initializer='he_normal'))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Block-7
model.add(Dense(num_classes,kernel_initializer='he_normal'))
model.add(Activation('softmax'))
6.保存模型并进行早停,降低学习率
from keras.optimizers import RMSprop,SGD,Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLRonPlateau
checkpoint = ModelCheckpoint('EmotionDetectionModel.h5',
monitor='val_loss',
mode='min',
save_best_only=True,
verbose=1)
earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=3,
verbose=1,
restore_best_weights=True
)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
7.编译,训练模型
callbacks = [earlystop,checkpoint,reduce_lr]
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics=['accuracy'])
nb_train_samples = 24176
nb_validation_samples = 3006
epochs=25
history=model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples//batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=validation_generator,
validation_steps=nb_validation_samples//batch_size)
下面利用opencv进行测试
from keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
face_classifier=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
classifier = load_model('EmotionDetectionModel.h5')
class_labels=['Angry','Happy','Neutral','Sad','Surprise']
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
labels=[]
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces=face_classifier.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray=gray[y:y+h,x:x+w]#特征区域范围
roi_gray=cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)
if np.sum([roi_gray])!=0:
roi=roi_gray.astype('float')/255.0#浮点型并归一化
roi=img_to_array(roi)#(48, 48, 1)
roi=np.expand_dims(roi,axis=0)#模型要求,需要扩充维度,(1, 48, 48, 1)
preds=classifier.predict(roi)[0]
label=class_labels[preds.argmax()]
label_position=(x,y)
cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
else:
cv2.putText(frame,'No Face Found',(20,20),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
cv2.imshow('Emotion Detector',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()#停止捕获视频
cv2.destroyAllWindows()



