本文通过OpenCV模块调用摄像头,利用百度开放的人体关键点检测API,实现了实时对人体的关键点检测。
一、百度API获取
使用百度的API,首先要在百度AI的官网上注册帐号,然后在人体关键点识别功能下创建一个应用,即可得到
APP_ID、API_KEY、SECRET_KEY,这样就可以调用它的API了。调用方式可以参考其API文档https://ai.baidu.com/ai-doc/BODY/0k3cpyxme
并且需要安装百度的API接口模块:
pip install baidu-aip
二、人体关键点的获取
调用百度API代码如下:
class BaiduAIP(object):
def __init__(self):
self.client = AipBodyAnalysis(cfg.APP_ID, cfg.API_KEY, cfg.SECRET_KEY)
def bodyAnalysis(self,img_jpg):
etval, buffer = cv2.imencode('.jpg', img_jpg)#将jpg格式图片转换成数据流
result = self.client.bodyAnalysis(buffer) #内部把buffer转换为base64了
return result
三、人体关键点的绘制
绘制关键点和连线和方框的函数代码:
def draw_line(img,dic,text):
color=(0,255,0)
thickness=2
if(text=='warn'):
color=(0,0,255)
#nose ---> neck
cv2.line(img, (int(dic['nose']['x']),int(dic['nose']['y'])),(int(dic['neck']['x']),int(dic['neck']['y'])), color, thickness)
#neck --> left_shoulder
cv2.line(img, (int(dic['neck']['x']),int(dic['neck']['y'])),(int(dic['left_shoulder']['x']),int(dic['left_shoulder']['y'])), color, thickness)
#neck --> right_shoulder
cv2.line(img, (int(dic['neck']['x']),int(dic['neck']['y'])),(int(dic['right_shoulder']['x']),int(dic['right_shoulder']['y'])), color, thickness)
#left_shoulder --> left_elbow
cv2.line(img, (int(dic['left_shoulder']['x']),int(dic['left_shoulder']['y'])),(int(dic['left_elbow']['x']),int(dic['left_elbow']['y'])), color, thickness)
#left_elbow --> left_wrist
cv2.line(img, (int(dic['left_elbow']['x']),int(dic['left_elbow']['y'])),(int(dic['left_wrist']['x']),int(dic['left_wrist']['y'])), color, thickness)
#right_shoulder --> right_elbow
cv2.line(img, (int(dic['right_shoulder']['x']),int(dic['right_shoulder']['y'])),(int(dic['right_elbow']['x']),int(dic['right_elbow']['y'])), color, thickness)
#right_elbow --> right_wrist
cv2.line(img, (int(dic['right_elbow']['x']),int(dic['right_elbow']['y'])),(int(dic['right_wrist']['x']),int(dic['right_wrist']['y'])), color, thickness)
#neck --> left_hip
cv2.line(img, (int(dic['neck']['x']),int(dic['neck']['y'])),(int(dic['left_hip']['x']),int(dic['left_hip']['y'])), color, thickness)
#neck --> right_hip
cv2.line(img, (int(dic['neck']['x']),int(dic['neck']['y'])),(int(dic['right_hip']['x']),int(dic['right_hip']['y'])), color, thickness)
#left_hip --> left_knee
cv2.line(img, (int(dic['left_hip']['x']),int(dic['left_hip']['y'])),(int(dic['left_knee']['x']),int(dic['left_knee']['y'])), color, thickness)
#right_hip --> right_knee
cv2.line(img, (int(dic['right_hip']['x']),int(dic['right_hip']['y'])),(int(dic['right_knee']['x']),int(dic['right_knee']['y'])), color, thickness)
#left_knee --> left_ankle
cv2.line(img, (int(dic['left_knee']['x']),int(dic['left_knee']['y'])),(int(dic['left_ankle']['x']),int(dic['left_ankle']['y'])), color, thickness)
#right_knee --> right_ankle
cv2.line(img, (int(dic['right_knee']['x']),int(dic['right_knee']['y'])),(int(dic['right_ankle']['x']),int(dic['right_ankle']['y'])), color, thickness)
def draw_point(img,dic,text):
color=(0,255,0)
thickness=2
if(text=='warn'):
color=(0,0,255)
for i in dic:
cv2.circle(img,(int(dic[i]['x']),int(dic[i]['y'])),5,color,thickness)
def draw_box(img,dic,text):
color=(255,0,0)
if(text=='warn'):
color=(0,0,255)
left_top=(int(dic['left']),int(dic['top']))
left_bottom=(int(dic['left']),int(dic['top']+dic['height']))
right_bottom=(int(dic['left']+dic['width']),int(dic['top']+dic['height']))
right_top=(int(dic['left']+dic['width']),int(dic['top']))
cv2.line(img, left_top,left_bottom, color, 2)
cv2.line(img, left_top,right_top, color, 2)
cv2.line(img, right_bottom,left_bottom,color, 2)
cv2.line(img, right_bottom,right_top,color, 2)
cv2.putText(img, text, (int(dic['left']),int(dic['top'])+20), cv2.FONT_HERSHEY_COMPLEX, 1,color, 1)
四、OpenCV打开摄像头获取人物
打开摄像头获取人物,实现测试需求:
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
# cv2.VideoCapture(0)代表调取摄像头资源,其中0代表电脑摄像头,1代表外接摄像头(usb摄像头)
cap.set(3, 900)
cap.set(4, 900)
# cap.set()设置摄像头参数:3:宽 4:高
while (1):
# get a frame
ret, frame = cap.read()
# show a frame
baiduapi = BaiduAIP()
d = baiduapi.bodyAnalysis(frame)
persion = d["person_info"]
for p in persion:
draw_line(frame,p['body_parts'], 'ok')
draw_point(frame,p['body_parts'], 'ok')
draw_box(frame,p['location'], 'beauty')
cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
五、结果展示
结果:
不调用摄像头检测图片测试结果
六、总程序
from aip import AipBodyAnalysis
import cv2
import time
class BaiduAIP(object):
def __init__(self):
self.client = AipBodyAnalysis(cfg.APP_ID, cfg.API_KEY, cfg.SECRET_KEY)
def bodyAnalysis(self, img_jpg):
etval, buffer = cv2.imencode('ww.jpg', img_jpg)
result = self.client.bodyAnalysis(buffer) # 内部把buffer转换为base64了
return result
def draw_line(img, dic, text):
color = (0, 255, 0)
thickness = 2
if (text == 'warn'):
color = (0, 0, 255)
# nose ---> neck
cv2.line(img, (int(dic['nose']['x']), int(dic['nose']['y'])), (int(dic['neck']['x']), int(dic['neck']['y'])), color,
thickness)
# neck --> left_shoulder
cv2.line(img, (int(dic['neck']['x']), int(dic['neck']['y'])),
(int(dic['left_shoulder']['x']), int(dic['left_shoulder']['y'])), color, thickness)
# neck --> right_shoulder
cv2.line(img, (int(dic['neck']['x']), int(dic['neck']['y'])),
(int(dic['right_shoulder']['x']), int(dic['right_shoulder']['y'])), color, thickness)
# left_shoulder --> left_elbow
cv2.line(img, (int(dic['left_shoulder']['x']), int(dic['left_shoulder']['y'])),
(int(dic['left_elbow']['x']), int(dic['left_elbow']['y'])), color, thickness)
# left_elbow --> left_wrist
cv2.line(img, (int(dic['left_elbow']['x']), int(dic['left_elbow']['y'])),
(int(dic['left_wrist']['x']), int(dic['left_wrist']['y'])), color, thickness)
# right_shoulder --> right_elbow
cv2.line(img, (int(dic['right_shoulder']['x']), int(dic['right_shoulder']['y'])),
(int(dic['right_elbow']['x']), int(dic['right_elbow']['y'])), color, thickness)
# right_elbow --> right_wrist
cv2.line(img, (int(dic['right_elbow']['x']), int(dic['right_elbow']['y'])),
(int(dic['right_wrist']['x']), int(dic['right_wrist']['y'])), color, thickness)
# neck --> left_hip
cv2.line(img, (int(dic['neck']['x']), int(dic['neck']['y'])),
(int(dic['left_hip']['x']), int(dic['left_hip']['y'])), color, thickness)
# neck --> right_hip
cv2.line(img, (int(dic['neck']['x']), int(dic['neck']['y'])),
(int(dic['right_hip']['x']), int(dic['right_hip']['y'])), color, thickness)
# left_hip --> left_knee
cv2.line(img, (int(dic['left_hip']['x']), int(dic['left_hip']['y'])),
(int(dic['left_knee']['x']), int(dic['left_knee']['y'])), color, thickness)
# right_hip --> right_knee
cv2.line(img, (int(dic['right_hip']['x']), int(dic['right_hip']['y'])),
(int(dic['right_knee']['x']), int(dic['right_knee']['y'])), color, thickness)
# left_knee --> left_ankle
cv2.line(img, (int(dic['left_knee']['x']), int(dic['left_knee']['y'])),
(int(dic['left_ankle']['x']), int(dic['left_ankle']['y'])), color, thickness)
# right_knee --> right_ankle
cv2.line(img, (int(dic['right_knee']['x']), int(dic['right_knee']['y'])),
(int(dic['right_ankle']['x']), int(dic['right_ankle']['y'])), color, thickness)
def draw_point(img, dic, text):
color = (0, 255, 0)
thickness = 2
if (text == 'warn'):
color = (0, 0, 255)
for i in dic:
cv2.circle(img, (int(dic[i]['x']), int(dic[i]['y'])), 5, color, thickness)
def draw_box(img, dic, text):
color = (255, 0, 0)
if (text == 'warn'):
color = (0, 0, 255)
left_top = (int(dic['left']), int(dic['top']))
left_bottom = (int(dic['left']), int(dic['top'] + dic['height']))
right_bottom = (int(dic['left'] + dic['width']), int(dic['top'] + dic['height']))
right_top = (int(dic['left'] + dic['width']), int(dic['top']))
cv2.line(img, left_top, left_bottom, color, 2)
cv2.line(img, left_top, right_top, color, 2)
cv2.line(img, right_bottom, left_bottom, color, 2)
cv2.line(img, right_bottom, right_top, color, 2)
cv2.putText(img, text, (int(dic['left']), int(dic['top']) + 20), cv2.FONT_HERSHEY_COMPLEX, 1, color, 1)
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
# cv2.VideoCapture(0)代表调取摄像头资源,其中0代表电脑摄像头,1代表外接摄像头(usb摄像头)
cap.set(3, 900)
cap.set(4, 900)
# cap.set()设置摄像头参数:3:宽 4:高
while (1):
# get a frame
ret, frame = cap.read()
# show a frame
baiduapi = BaiduAIP()
d = baiduapi.bodyAnalysis(frame)
persion = d["person_info"]
for p in persion:
draw_line(frame,p['body_parts'], 'ok')
draw_point(frame,p['body_parts'], 'ok')
draw_box(frame,p['location'], 'beauty')
cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
通过调用摄像头来实现对人体关键点的检测,对电脑和网络的配置要求有点高。如果设备配置低代码运行起来可能有点卡。
七、 借鉴的博客
https://blog.csdn.net/qq_37394634/article/details/105494672