CV:基于keras利用cv2自带两步检测法对《跑男第六季第五期》之如花片段(或调用摄像头)进行实时脸部表情检测

输出结果

CV:基于keras利用cv2自带两步检测法对《跑男第六季第五期》之如花片段(或调用摄像头)进行实时脸部表情检测


设计思路

CV:基于keras利用cv2自带两步检测法对《跑男第六季第五期》之如花片段(或调用摄像头)进行实时脸部表情检测



核心代码

、#CV:基于keras利用cv2自带两步检测法对《跑男第六季第五期》"如花片段"(或调用摄像头)进行实时脸部表情检测——Jason Niu

import cv2

from keras.models import load_model

import numpy as np

detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'

emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'

emotion_labels = get_labels('fer2013')

frame_window = 10  

emotion_offsets = (20, 40)

face_detection = load_detection_model(detection_model_path)

emotion_classifier = load_model(emotion_model_path, compile=False)

emotion_target_size = emotion_classifier.input_shape[1:3]

emotion_window = []

cv2.namedWindow('window_frame,by Jason Niu') #摄像头窗口名称

# video_capture = cv2.VideoCapture(0) #函数定义摄像头对象,其参数0表示第一个摄像头,一般就是笔记本的内建摄像头。

video_capture = cv2.VideoCapture("F:\File_Python\Python_example\YOLOv3_use_TF\RunMan5.mp4")

while True:

   bgr_image = video_capture.read()[1]

   gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)

   rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

   faces = detect_faces(face_detection, gray_image)

   for face_coordinates in faces:  

       x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)

       gray_face = gray_image[y1:y2, x1:x2]  #[坐标参数,尺寸参数]

       try:

           gray_face = cv2.resize(gray_face, (emotion_target_size))

       except:

           continue

       gray_face = preprocess_input(gray_face, True)

       gray_face = np.expand_dims(gray_face, 0)

       gray_face = np.expand_dims(gray_face, -1)

       emotion_prediction = emotion_classifier.predict(gray_face)

       emotion_probability = np.max(emotion_prediction)

       emotion_label_arg = np.argmax(emotion_prediction)

       emotion_text = emotion_labels[emotion_label_arg]  

       emotion_window.append(emotion_text)            

       if len(emotion_window) > frame_window:

           emotion_window.pop(0)  

       try:

           emotion_mode = mode(emotion_window)

       except:

           continue

       #if条件根据不同表情显示不同颜色

       if emotion_text == 'angry':

           color = emotion_probability * np.asarray((255, 0, 0)) #红色

       elif emotion_text == 'sad':

           color = emotion_probability * np.asarray((0, 0, 255)) #蓝色

       elif emotion_text == 'happy':

           color = emotion_probability * np.asarray((255, 255, 0)) #黄色

       elif emotion_text == 'surprise':

           color = emotion_probability * np.asarray((0, 255, 255)) #青色

       else:

           color = emotion_probability * np.asarray((0, 255, 0))  #绿色

       color = color.astype(int)

       color = color.tolist()

       draw_bounding_box(face_coordinates, rgb_image, color)  

       draw_text(face_coordinates, rgb_image, emotion_mode,

                 color, 0, -45, 1, 4)                        

 

   bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

   cv2.namedWindow("window_frame,by Jason Niu",0);

   cv2.resizeWindow("window_frame,by Jason Niu", 640, 380);

   cv2.imshow('window_frame,by Jason Niu', bgr_image)    

   if cv2.waitKey(1) & 0xFF == ord('q'):  

       break


上一篇:谷歌推出定制化机器学习芯片 速度是传统GPU的15到30倍


下一篇:HPE还将继续支持下一代Superdome GPU芯片