目录
效果展示:
步骤一:安装人脸客户端库和窗口相关的库
添加以下 using 指令:
///人脸客户端库
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Threading.Tasks;
using Microsoft.Azure.CognitiveServices.Vision.Face;
using Microsoft.Azure.CognitiveServices.Vision.Face.Models;
///窗口相关库
using System.Windows;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
步骤二:声明初始化相关变量
为资源的密钥和终结点创建变量
// 添加你自己的密钥
private static string subscriptionKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxx";
// 添加终结点
private static string faceEndpoint = "https://cnsoft.cognitiveservices.azure.cn/";
客户端构造器
private readonly IFaceClient faceClient = new FaceClient(
new ApiKeyServiceClientCredentials(subscriptionKey),
new System.Net.Http.DelegatingHandler[] { }
);
FaceClient
:此类代表使用人脸服务的授权,使用所有人脸功能时都需要用到它。 请使用你的订阅信息实例化此类,然后使用它来生成其他类的实例。
图片相关变量
// 检测到的人脸的列表
private IList<DetectedFace> faceList;
// 检测到的人脸描述 字符串数组
private string[] faceDescriptions;
// 显示图像的调整大小因子。
private double resizeFactor;
DectectedFace
:此类代表已从图像中的单个人脸检测到的所有数据。 可以使用它来检索有关人脸的详细信息。
窗口工具栏
private const string defaultStatusBarText =
"Place the mouse pointer over a face to see the face description.";
步骤三:MainWindiow窗口构造器
public MainWindow()
{
//初始化窗口组件
InitializeComponent();
//如果字符串格式良好,为人脸客户端终结点赋值
if (Uri.IsWellFormedUriString(faceEndpoint, UriKind.Absolute))
{
faceClient.Endpoint = faceEndpoint;
}
else//否则的话显示错误信息
{
MessageBox.Show(faceEndpoint,
"Invalid URI", MessageBoxButton.OK, MessageBoxImage.Error);
Environment.Exit(0);
}
}
Uri.IsWellFormedUriString(String, UriKind)
:此方法通过尝试用字符串构造一个 URI 来指示字符串是否为格式良好的,并确保字符串不需要进一步转义。
步骤四:方法:BrowseButton_Click实现点击按钮以获取展示图片并调用UploadAndDetectFaces方法,在人脸周围显示矩形
private async void BrowseButton_Click(object sender, RoutedEventArgs e)
{
//从用户获取图片文件
var openDlg = new Microsoft.Win32.OpenFileDialog();
//指定图片格式
openDlg.Filter = "JPEG Image(*.jpg)|*.jpg";
//判断是否获取成功
bool? result = openDlg.ShowDialog(this);
// Return if canceled.
if (!(bool)result)
{
return;
}
/ 显示图片
string filePath = openDlg.FileName;
Uri fileUri = new Uri(filePath);
BitmapImage bitmapSource = new BitmapImage();
bitmapSource.BeginInit();
bitmapSource.CacheOption = BitmapCacheOption.None;
bitmapSource.UriSource = fileUri;
bitmapSource.EndInit();
FacePhoto.Source = bitmapSource;
/ 检测所有的人脸
Title = "Detecting...";
faceList = await UploadAndDetectFaces(filePath);//上传并检测
Title = String.Format(
"Detection Finished. {0} face(s) detected", faceList.Count);
if (faceList.Count > 0)
{
// 准备从人脸周围话矩形
DrawingVisual visual = new DrawingVisual();
DrawingContext drawingContext = visual.RenderOpen();
drawingContext.DrawImage(bitmapSource,
new Rect(0, 0, bitmapSource.Width, bitmapSource.Height));
double dpi = bitmapSource.DpiX;
// 一些图片可能没有dpi
resizeFactor = (dpi == 0) ? 1 : 96 / dpi;
faceDescriptions = new String[faceList.Count];
for (int i = 0; i < faceList.Count; ++i)
{
DetectedFace face = faceList[i];
// 在人脸周围画矩形
drawingContext.DrawRectangle(
Brushes.Transparent,
new Pen(Brushes.Red, 2),
new Rect(
face.FaceRectangle.Left * resizeFactor,
face.FaceRectangle.Top * resizeFactor,
face.FaceRectangle.Width * resizeFactor,
face.FaceRectangle.Height * resizeFactor
)
);
// 将人脸的相关描述进行存储
faceDescriptions[i] = FaceDescription(face);
}
if (faceList.Count > 0)
{
TheEmotion.Text = "复制下面全部信息:\n" + faceDescriptions[0];
}
drawingContext.Close();
// 显示人脸周围的矩形
RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
(int)(bitmapSource.PixelWidth * resizeFactor),
(int)(bitmapSource.PixelHeight * resizeFactor),
96,
96,
PixelFormats.Pbgra32);
faceWithRectBitmap.Render(visual);
FacePhoto.Source = faceWithRectBitmap;
// 设置显示人脸描述的工具栏
faceDescriptionStatusBar.Text = defaultStatusBarText;
}
}
步骤五:方法FacePhoto_MouseMove:当鼠标移动到人脸矩形,显示相关的人脸描述信息
private void FacePhoto_MouseMove(object sender, MouseEventArgs e)
{
// 如果这个REST还没有完成,直接 return
if (faceList == null)
return;
// 找到图片相对于鼠标的位置
Point mouseXY = e.GetPosition(FacePhoto);
ImageSource imageSource = FacePhoto.Source;
BitmapSource bitmapSource = (BitmapSource)imageSource;
//调整比例
var scale = FacePhoto.ActualWidth / (bitmapSource.PixelWidth / resizeFactor);
// 检查是否在矩形内
bool mouseOverFace = false;
for (int i = 0; i < faceList.Count; ++i)
{
FaceRectangle fr = faceList[i].FaceRectangle;
double left = fr.Left * scale;
double top = fr.Top * scale;
double width = fr.Width * scale;
double height = fr.Height * scale;
// 展示信息如果在矩形内
if (mouseXY.X >= left && mouseXY.X <= left + width &&
mouseXY.Y >= top && mouseXY.Y <= top + height)
{
faceDescriptionStatusBar.Text = faceDescriptions[i];
mouseOverFace = true;
break;
}
}
// 如果不在显示默认信息
if (!mouseOverFace) faceDescriptionStatusBar.Text = defaultStatusBarText;
}
步骤六:方法UploadAndDetectFaces:上传图片并且调用DetectWithStreamAsync
private async Task<IList<DetectedFace>> UploadAndDetectFaces(string imageFilePath)
{
// 人脸属性的返回列表
IList<FaceAttributeType?> faceAttributes =
new FaceAttributeType?[]
{
FaceAttributeType.Gender, FaceAttributeType.Age,
FaceAttributeType.Smile, FaceAttributeType.Emotion,
FaceAttributeType.Glasses, FaceAttributeType.Hair
};
// 调用人脸API
try
{
using (Stream imageFileStream = File.OpenRead(imageFilePath))
{
IList<DetectedFace> faceList =
await faceClient.Face.DetectWithStreamAsync(
imageFileStream, true, false, faceAttributes);
return faceList;
}
}
// 调用出错
catch (APIErrorException f)
{
MessageBox.Show(f.Message);
return new List<DetectedFace>();
}
// Catch 并且展示所有错误信息
catch (Exception e)
{
MessageBox.Show(e.Message, "Error");
return new List<DetectedFace>();
}
}
步骤七:方法FaceDescription:处理人脸描述的字符串
private string FaceDescription(DetectedFace face)
{
StringBuilder sb = new StringBuilder();
sb.Append("Face: ");
//添加性别年龄和微笑描述
sb.Append(face.FaceAttributes.Gender);
sb.Append(", ");
sb.Append(face.FaceAttributes.Age);
sb.Append(", ");
sb.Append(String.Format("smile {0:F1}% ", face.FaceAttributes.Smile * 100));
sb.Append("\n");
// 添加情绪描述
Emotion emotionScores = face.FaceAttributes.Emotion;
sb.Append("anger: "+ emotionScores.Anger + "\n");
sb.Append("contempt: " + emotionScores.Contempt + "\n");
sb.Append("disgust: " + emotionScores.Disgust + "\n");
sb.Append("fear: " + emotionScores.Fear + "\n");
sb.Append("happiness: " + emotionScores.Happiness + "\n");
sb.Append("neutral: " + emotionScores.Neutral + "\n");
sb.Append("sadness: " + emotionScores.Sadness + "\n");
sb.Append("surprise: " + emotionScores.Surprise + "\n");
// 添加是否戴眼镜
sb.Append("ifhasGlasses: "+face.FaceAttributes.Glasses);
sb.Append("\n ");
// 添加头发描述
sb.Append("Hair: ");
//秃头率
if (face.FaceAttributes.Hair.Bald >= 0.01f)
sb.Append(String.Format("bald {0:F1}% ", face.FaceAttributes.Hair.Bald * 100));
// 头发颜色
IList<HairColor> hairColors = face.FaceAttributes.Hair.HairColor;
foreach (HairColor hairColor in hairColors)
{
if (hairColor.Confidence >= 0.1f)
{
sb.Append(hairColor.Color.ToString());
sb.Append(String.Format(" {0:F1}% ", hairColor.Confidence * 100));
}
}
// R返回字符串
return sb.ToString();
}
完整代码
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
// using System.Windows.Controls;
// using System.Windows.Data;
// using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
// using System.Windows.Navigation;
// using System.Windows.Shapes;
using Microsoft.Azure.CognitiveServices.Vision.Face;
using Microsoft.Azure.CognitiveServices.Vision.Face.Models;
namespace C14_WPF_EmotionAcuiring
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
// <snippet_mainwindow_fields>
// Add your Face subscription key to your environment variables.
private static string subscriptionKey = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx";
// Add your Face endpoint to your environment variables.
private static string faceEndpoint = "https://cnsoft.cognitiveservices.azure.cn/";
private readonly IFaceClient faceClient = new FaceClient(
new ApiKeyServiceClientCredentials(subscriptionKey),
new System.Net.Http.DelegatingHandler[] { });
// The list of detected faces.
private IList<DetectedFace> faceList;
// The list of descriptions for the detected faces.
private string[] faceDescriptions;
// The resize factor for the displayed image.
private double resizeFactor;
private const string defaultStatusBarText =
"Place the mouse pointer over a face to see the face description.";
// </snippet_mainwindow_fields>
// <snippet_mainwindow_constructor>
public MainWindow()
{
InitializeComponent();
if (Uri.IsWellFormedUriString(faceEndpoint, UriKind.Absolute))
{
faceClient.Endpoint = faceEndpoint;
}
else
{
MessageBox.Show(faceEndpoint,
"Invalid URI", MessageBoxButton.OK, MessageBoxImage.Error);
Environment.Exit(0);
}
}
// </snippet_mainwindow_constructor>
// <snippet_browsebuttonclick_start>
// Displays the image and calls UploadAndDetectFaces.
private async void BrowseButton_Click(object sender, RoutedEventArgs e)
{
// Get the image file to scan from the user.
var openDlg = new Microsoft.Win32.OpenFileDialog();
openDlg.Filter = "JPEG Image(*.jpg)|*.jpg";
bool? result = openDlg.ShowDialog(this);
// Return if canceled.
if (!(bool)result)
{
return;
}
// Display the image file.
string filePath = openDlg.FileName;
Uri fileUri = new Uri(filePath);
BitmapImage bitmapSource = new BitmapImage();
bitmapSource.BeginInit();
bitmapSource.CacheOption = BitmapCacheOption.None;
bitmapSource.UriSource = fileUri;
bitmapSource.EndInit();
FacePhoto.Source = bitmapSource;
// </snippet_browsebuttonclick_start>
// <snippet_browsebuttonclick_mid>
// Detect any faces in the image.
Title = "Detecting...";
faceList = await UploadAndDetectFaces(filePath);
Title = String.Format(
"Detection Finished. {0} face(s) detected", faceList.Count);
if (faceList.Count > 0)
{
// Prepare to draw rectangles around the faces.
DrawingVisual visual = new DrawingVisual();
DrawingContext drawingContext = visual.RenderOpen();
drawingContext.DrawImage(bitmapSource,
new Rect(0, 0, bitmapSource.Width, bitmapSource.Height));
double dpi = bitmapSource.DpiX;
// Some images don't contain dpi info.
resizeFactor = (dpi == 0) ? 1 : 96 / dpi;
faceDescriptions = new String[faceList.Count];
for (int i = 0; i < faceList.Count; ++i)
{
DetectedFace face = faceList[i];
// Draw a rectangle on the face.
drawingContext.DrawRectangle(
Brushes.Transparent,
new Pen(Brushes.Red, 2),
new Rect(
face.FaceRectangle.Left * resizeFactor,
face.FaceRectangle.Top * resizeFactor,
face.FaceRectangle.Width * resizeFactor,
face.FaceRectangle.Height * resizeFactor
)
);
// Store the face description.
faceDescriptions[i] = FaceDescription(face);
}
if (faceList.Count > 0)
{
TheEmotion.Text = "复制下面全部信息:\n" + faceDescriptions[0];
}
drawingContext.Close();
// Display the image with the rectangle around the face.
RenderTargetBitmap faceWithRectBitmap = new RenderTargetBitmap(
(int)(bitmapSource.PixelWidth * resizeFactor),
(int)(bitmapSource.PixelHeight * resizeFactor),
96,
96,
PixelFormats.Pbgra32);
faceWithRectBitmap.Render(visual);
FacePhoto.Source = faceWithRectBitmap;
// Set the status bar text.
faceDescriptionStatusBar.Text = defaultStatusBarText;
}
// </snippet_browsebuttonclick_mid>
// <snippet_browsebuttonclick_end>
}
// </snippet_browsebuttonclick_end>
// <snippet_mousemove_start>
// Displays the face description when the mouse is over a face rectangle.
private void FacePhoto_MouseMove(object sender, MouseEventArgs e)
{
// </snippet_mousemove_start>
// <snippet_mousemove_mid>
// If the REST call has not completed, return.
if (faceList == null)
return;
// Find the mouse position relative to the image.
Point mouseXY = e.GetPosition(FacePhoto);
ImageSource imageSource = FacePhoto.Source;
BitmapSource bitmapSource = (BitmapSource)imageSource;
// Scale adjustment between the actual size and displayed size.
var scale = FacePhoto.ActualWidth / (bitmapSource.PixelWidth / resizeFactor);
// Check if this mouse position is over a face rectangle.
bool mouseOverFace = false;
for (int i = 0; i < faceList.Count; ++i)
{
FaceRectangle fr = faceList[i].FaceRectangle;
double left = fr.Left * scale;
double top = fr.Top * scale;
double width = fr.Width * scale;
double height = fr.Height * scale;
// Display the face description if the mouse is over this face rectangle.
if (mouseXY.X >= left && mouseXY.X <= left + width &&
mouseXY.Y >= top && mouseXY.Y <= top + height)
{
faceDescriptionStatusBar.Text = faceDescriptions[i];
mouseOverFace = true;
break;
}
}
// String to display when the mouse is not over a face rectangle.
if (!mouseOverFace) faceDescriptionStatusBar.Text = defaultStatusBarText;
// </snippet_mousemove_mid>
// <snippet_mousemove_end>
}
// </snippet_mousemove_end>
// <snippet_uploaddetect>
// Uploads the image file and calls DetectWithStreamAsync.
private async Task<IList<DetectedFace>> UploadAndDetectFaces(string imageFilePath)
{
// The list of Face attributes to return.
IList<FaceAttributeType?> faceAttributes =
new FaceAttributeType?[]
{
FaceAttributeType.Gender, FaceAttributeType.Age,
FaceAttributeType.Smile, FaceAttributeType.Emotion,
FaceAttributeType.Glasses, FaceAttributeType.Hair
};
// Call the Face API.
try
{
using (Stream imageFileStream = File.OpenRead(imageFilePath))
{
// The second argument specifies to return the faceId, while
// the third argument specifies not to return face landmarks.
IList<DetectedFace> faceList =
await faceClient.Face.DetectWithStreamAsync(
imageFileStream, true, false, faceAttributes);
return faceList;
}
}
// Catch and display Face API errors.
catch (APIErrorException f)
{
MessageBox.Show(f.Message);
return new List<DetectedFace>();
}
// Catch and display all other errors.
catch (Exception e)
{
MessageBox.Show(e.Message, "Error");
return new List<DetectedFace>();
}
}
// </snippet_uploaddetect>
// <snippet_facedesc>
// Creates a string out of the attributes describing the face.
private string FaceDescription(DetectedFace face)
{
StringBuilder sb = new StringBuilder();
sb.Append("Face: ");
// Add the gender, age, and smile.
sb.Append(face.FaceAttributes.Gender);
sb.Append(", ");
sb.Append(face.FaceAttributes.Age);
sb.Append(", ");
sb.Append(String.Format("smile {0:F1}% ", face.FaceAttributes.Smile * 100));
sb.Append("\n");
// Add the emotions. Display all emotions over 10%.
Emotion emotionScores = face.FaceAttributes.Emotion;
sb.Append("anger: "+ emotionScores.Anger + "\n");
sb.Append("contempt: " + emotionScores.Contempt + "\n");
sb.Append("disgust: " + emotionScores.Disgust + "\n");
sb.Append("fear: " + emotionScores.Fear + "\n");
sb.Append("happiness: " + emotionScores.Happiness + "\n");
sb.Append("neutral: " + emotionScores.Neutral + "\n");
sb.Append("sadness: " + emotionScores.Sadness + "\n");
sb.Append("surprise: " + emotionScores.Surprise + "\n");
// Add glasses.
sb.Append("ifhasGlasses: "+face.FaceAttributes.Glasses);
sb.Append("\n ");
// Add hair.
sb.Append("Hair: ");
// Display baldness confidence if over 1%.
if (face.FaceAttributes.Hair.Bald >= 0.01f)
sb.Append(String.Format("bald {0:F1}% ", face.FaceAttributes.Hair.Bald * 100));
// Display all hair color attributes over 10%.
IList<HairColor> hairColors = face.FaceAttributes.Hair.HairColor;
foreach (HairColor hairColor in hairColors)
{
if (hairColor.Confidence >= 0.1f)
{
sb.Append(hairColor.Color.ToString());
sb.Append(String.Format(" {0:F1}% ", hairColor.Confidence * 100));
}
}
// Return the built string.
return sb.ToString();
}
// </snippet_facedesc>
}
}