Android 视频通信,低延时解决方案

背景:

  由于,项目需要,需要进行视频通信,把a的画面,转给b。

运维部署:

  APP1:编码摄像头采集的数据,并且发送数据到服务端

  APP2:从服务端,拉取数据,并且进行解码显示

  服务端:接收APP1提交的数据,发送APP1提交数据到APP2

应用说明:

  APP1:camera = Camera.open(Camera.CameraInfo.CAMERA_FACING_FRONT);

        Camera.Parameters parameters = camera.getParameters();
parameters.setPreviewFormat(ImageFormat.NV21);
parameters.setPreviewSize(width, height); // 设置屏幕亮度
parameters.setExposureCompensation(parameters.getMaxExposureCompensation() / 2);
camera.setParameters(parameters);
camera.setDisplayOrientation(90);
camera.setPreviewCallback(new Camera.PreviewCallback() {
@Override
public void onPreviewFrame(byte[] data, Camera camera) {
// 采集视频数据,同时记录采集视频的时间点,解码需要(保证视频连续,流畅,且不花屏需要)
stamptime = System.nanoTime();
yuv_data = data;
}
});
 public class AvcKeyFrameEncoder {
private final static String TAG = "MeidaCodec";
private int TIMEOUT_USEC = 12000; private MediaCodec mediaCodec;
int m_width;
int m_height;
int m_framerate; public byte[] configbyte; //待解码视频缓冲队列,静态成员!
public byte[] yuv_data = null;
public long stamptime = 0; public AvcKeyFrameEncoder(int width, int height, int framerate) {
m_width = width;
m_height = height;
m_framerate = framerate; //正常的编码出来是横屏的。因为手机本身采集的数据默认就是横屏的
// MediaFormat mediaFormat = MediaFormat.createVideoFormat(mime, width, height);
//如果你需要旋转90度或者270度,那么需要把宽和高对调。否则会花屏。因为比如你320 X 240,图像旋转90°之后宽高变成了240 X 320。
MediaFormat mediaFormat = MediaFormat.createVideoFormat("video/avc", width, height);
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, 125000);
mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, framerate); //
mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1);
try {
mediaCodec = MediaCodec.createEncoderByType("video/avc");
} catch (IOException e) {
e.printStackTrace();
} //配置编码器参数
mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE); //启动编码器
mediaCodec.start();
} public void StopEncoder() {
try {
mediaCodec.stop();
mediaCodec.release();
} catch (Exception e) {
e.printStackTrace();
}
} public boolean isRuning = false; public void StartEncoderThread(final ISaveVideo saveVideo, final ICall callback) {
isRuning = true;
new Thread(new Runnable() {
@Override
public void run() {
byte[] input = null;
long pts = 0;
while (isRuning) {
// 访问MainActivity用来缓冲待解码数据的队列
if(yuv_data == null){
continue;
} if (yuv_data != null) {
//从缓冲队列中取出一帧
input = yuv_data;
pts = stamptime;
yuv_data = null;
byte[] yuv420sp = new byte[m_width * m_height * 3 / 2]; NV21ToNV12(input, yuv420sp, m_width, m_height);
input = yuv420sp;
} if (input != null) {
try {
//编码器输入缓冲区
ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers(); //编码器输出缓冲区
ByteBuffer[] outputBuffers = mediaCodec.getOutputBuffers();
int inputBufferIndex = mediaCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
//把转换后的YUV420格式的视频帧放到编码器输入缓冲区中
inputBuffer.put(input);
mediaCodec.queueInputBuffer(inputBufferIndex, 0, input.length, pts, 0);
} MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, TIMEOUT_USEC);
while (outputBufferIndex >= 0) {
//Log.i("AvcEncoder", "Get H264 Buffer Success! flag = "+bufferInfo.flags+",pts = "+bufferInfo.presentationTimeUs+"");
ByteBuffer outputBuffer = outputBuffers[outputBufferIndex];
byte[] outData = new byte[bufferInfo.size];
outputBuffer.get(outData);
if (bufferInfo.flags == BUFFER_FLAG_CODEC_CONFIG) {
configbyte = new byte[bufferInfo.size];
configbyte = outData;
} else if (bufferInfo.flags == BUFFER_FLAG_KEY_FRAME) {
byte[] keyframe = new byte[bufferInfo.size + configbyte.length];
System.arraycopy(configbyte, 0, keyframe, 0, configbyte.length);
//把编码后的视频帧从编码器输出缓冲区中拷贝出来
System.arraycopy(outData, 0, keyframe, configbyte.length, outData.length); Logs.i("上传I帧 " + keyframe.length);
byte[] send_data = new byte[13 + keyframe.length];
System.arraycopy(new byte[]{0x01}, 0, send_data, 0, 1);
System.arraycopy(IntBytes.longToBytes(pts), 0, send_data, 1, 8);
System.arraycopy(IntBytes.intToByteArray(keyframe.length), 0, send_data, 9, 4);
System.arraycopy(keyframe, 0, send_data, 13, keyframe.length);
if(saveVideo != null){
saveVideo.SaveVideoData(send_data);
} if(callback != null){
callback.callback(keyframe, pts);
}
} else {
byte[] send_data = new byte[13 + outData.length];
System.arraycopy(new byte[]{0x02}, 0, send_data, 0, 1);
System.arraycopy(IntBytes.longToBytes(pts), 0, send_data, 1, 8);
System.arraycopy(IntBytes.intToByteArray(outData.length), 0, send_data, 9, 4);
System.arraycopy(outData, 0, send_data, 13, outData.length);
if(saveVideo != null){
saveVideo.SaveVideoData(send_data);
} if(callback != null){
callback.callback(outData, pts);
}
} mediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, TIMEOUT_USEC);
} } catch (Throwable t) {
t.printStackTrace();
break;
}
}
}
}
}).start();
} private void NV21ToNV12(byte[] nv21, byte[] nv12, int width, int height) {
if (nv21 == null || nv12 == null) return;
int framesize = width * height;
int i = 0, j = 0;
System.arraycopy(nv21, 0, nv12, 0, framesize);
for (i = 0; i < framesize; i++) {
nv12[i] = nv21[i];
} for (j = 0; j < framesize / 2; j += 2) {
nv12[framesize + j - 1] = nv21[j + framesize];
} for (j = 0; j < framesize / 2; j += 2) {
nv12[framesize + j] = nv21[j + framesize - 1];
}
}
}

视频编码类Encoder

其中使用到了,接口用于,把采集和编码后的数据,往外部传递,通过线程提交到服务端。或者通过本地解码显示,查看,编码解码时间差。

通过使用 ArrayBlockingQueue<byte[]> H264Queue = new ArrayBlockingQueue<byte[]>(10); 队列,对接口提交数据,进行暂时保存,在后台对数据,进行解码或提交到服务端。

  APP2:接入服务端,然后从I帧数据开始拿数据,(且数据是最新的I帧开始保存的数据)。同时需要把,之前采集得到的时间点传给:

MediaCodec 对象的 queueInputBuffer 方法的时间戳参数(第四个)。

服务端:一帧一帧接收APP1传入数据,对I帧开始的数据进行记录,同时对非I帧开始的数据,进行丢弃。一次只保存一帧内容。读取数据,并且移除已经添加数据,循环发送给APP2

public class VideoDecoder {
private Thread mDecodeThread;
private MediaCodec mCodec;
private boolean mStopFlag = false;
private int Video_Width = 640;
private int Video_Height = 480;
private int FrameRate = 25;
private Boolean isUsePpsAndSps = false;
private ReceiveVideoThread runThread = null; public VideoDecoder(String ip, int port, byte type, int roomId){
runThread = new ReceiveVideoThread(ip, port, type, roomId);
new Thread(runThread).start();
} public void InitReadData(Surface surface){
try {
//通过多媒体格式名创建一个可用的解码器
mCodec = MediaCodec.createDecoderByType("video/avc");
} catch (IOException e) {
e.printStackTrace();
} //初始化编码器
final MediaFormat mediaformat = MediaFormat.createVideoFormat("video/avc", Video_Width, Video_Height); //设置帧率
mediaformat.setInteger(MediaFormat.KEY_FRAME_RATE, FrameRate); //https://developer.android.com/reference/android/media/MediaFormat.html#KEY_MAX_INPUT_SIZE
//设置配置参数,参数介绍 :
// format 如果为解码器,此处表示输入数据的格式;如果为编码器,此处表示输出数据的格式。
//surface 指定一个surface,可用作decode的输出渲染。
//crypto 如果需要给媒体数据加密,此处指定一个crypto类.
// flags 如果正在配置的对象是用作编码器,此处加上CONFIGURE_FLAG_ENCODE 标签。
mCodec.configure(mediaformat, surface, null, 0);
startDecodingThread();
} private void startDecodingThread() {
mCodec.start(); mDecodeThread = new Thread(new decodeH264Thread());
mDecodeThread.start();
} @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
private class decodeH264Thread implements Runnable {
@Override
public void run() {
try {
// saveDataLoop();
decodeLoop_New();
} catch (Exception e) {
e.printStackTrace();
}
} private void decodeLoop_New() {
// 存放目标文件的数据
ByteBuffer[] inputBuffers = mCodec.getInputBuffers();
// 解码后的数据,包含每一个buffer的元数据信息,例如偏差,在相关解码器中有效的数据大小
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
long timeoutUs = 1000;
byte[] marker0 = new byte[]{0, 0, 0, 1};
byte[] dummyFrame = new byte[]{0x00, 0x00, 0x01, 0x20};
byte[] streamBuffer = null;
while (true) {
if(runThread.H264Queue.size() > 0){
streamBuffer = runThread.H264Queue.poll();
}else{
try {
Thread.sleep(20);
}catch (Exception ex){
} continue;
} byte[] time_data = new byte[8];
System.arraycopy(streamBuffer, 0, time_data, 0, 8);
long pts = IntBytes.bytesToLong(time_data);
byte[] video_data = new byte[streamBuffer.length - 8];
System.arraycopy(streamBuffer, 8, video_data, 0, video_data.length);
streamBuffer = video_data; Logs.i("得到 streamBuffer " + streamBuffer.length + " pts " + pts);
int bytes_cnt = 0;
mStopFlag = false;
while (mStopFlag == false) {
bytes_cnt = streamBuffer.length;
if (bytes_cnt == 0) {
streamBuffer = dummyFrame;
} int startIndex = 0;
int remaining = bytes_cnt;
while (true) {
if (remaining == 0 || startIndex >= remaining) {
break;
}
int nextFrameStart = KMPMatch(marker0, streamBuffer, startIndex + 2, remaining);
if (nextFrameStart == -1) {
nextFrameStart = remaining;
} else {
} int inIndex = mCodec.dequeueInputBuffer(timeoutUs);
if (inIndex >= 0) {
ByteBuffer byteBuffer = inputBuffers[inIndex];
byteBuffer.clear();
byteBuffer.put(streamBuffer, startIndex, nextFrameStart - startIndex);
//在给指定Index的inputbuffer[]填充数据后,调用这个函数把数据传给解码器
mCodec.queueInputBuffer(inIndex, 0, nextFrameStart - startIndex, pts, 0);
startIndex = nextFrameStart;
} else {
continue;
} int outIndex = mCodec.dequeueOutputBuffer(info, timeoutUs);
if (outIndex >= 0) {
//帧控制是不在这种情况下工作,因为没有PTS H264是可用的
/*
while (info.presentationTimeUs / 1000 > System.currentTimeMillis() - startMs) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
*/
boolean doRender = (info.size != 0);
//对outputbuffer的处理完后,调用这个函数把buffer重新返回给codec类。 // TODO:添加处理,保存原始帧数据
if (doRender) {
Image image = mCodec.getOutputImage(outIndex);
if (image != null) {
// 通过反射
// 发送数据到指定接口
byte[] data = getDataFromImage(image, COLOR_FormatNV21);
}
} mCodec.releaseOutputBuffer(outIndex, doRender);
} else {
// Log.e(TAG, "bbbb");
}
}
mStopFlag = true;
} // Logs.i("处理单帧视频耗时:" + (System.currentTimeMillis() - c_start));
}
}
} private static final boolean VERBOSE = false;
private static final long DEFAULT_TIMEOUT_US = 10000; private static final int COLOR_FormatI420 = 1;
private static final int COLOR_FormatNV21 = 2; private static boolean isImageFormatSupported(Image image) {
int format = image.getFormat();
switch (format) {
case ImageFormat.YUV_420_888:
case ImageFormat.NV21:
case ImageFormat.YV12:
return true;
}
return false;
} @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
private static byte[] getDataFromImage(Image image, int colorFormat) {
if (colorFormat != COLOR_FormatI420 && colorFormat != COLOR_FormatNV21) {
throw new IllegalArgumentException("only support COLOR_FormatI420 " + "and COLOR_FormatNV21");
}
if (!isImageFormatSupported(image)) {
throw new RuntimeException("can't convert Image to byte array, format " + image.getFormat());
} Rect crop = image.getCropRect();
int format = image.getFormat();
int width = crop.width();
int height = crop.height();
Image.Plane[] planes = image.getPlanes();
byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
byte[] rowData = new byte[planes[0].getRowStride()];
if (VERBOSE) Logs.i("get data from " + planes.length + " planes");
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
if (colorFormat == COLOR_FormatI420) {
channelOffset = width * height;
outputStride = 1;
} else if (colorFormat == COLOR_FormatNV21) {
channelOffset = width * height + 1;
outputStride = 2;
}
break;
case 2:
if (colorFormat == COLOR_FormatI420) {
channelOffset = (int) (width * height * 1.25);
outputStride = 1;
} else if (colorFormat == COLOR_FormatNV21) {
channelOffset = width * height;
outputStride = 2;
}
break;
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
if (VERBOSE) {
Logs.i("pixelStride " + pixelStride);
Logs.i("rowStride " + rowStride);
Logs.i("width " + width);
Logs.i("height " + height);
Logs.i("buffer size " + buffer.remaining());
}
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(data, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
data[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
if (VERBOSE) Logs.i("Finished reading data from plane " + i);
}
return data;
} private int KMPMatch(byte[] pattern, byte[] bytes, int start, int remain) {
try {
Thread.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
} int[] lsp = computeLspTable(pattern); int j = 0; // Number of chars matched in pattern
for (int i = start; i < remain; i++) {
while (j > 0 && bytes[i] != pattern[j]) {
// Fall back in the pattern
j = lsp[j - 1]; // Strictly decreasing
}
if (bytes[i] == pattern[j]) {
// Next char matched, increment position
j++;
if (j == pattern.length)
return i - (j - 1);
}
}
return -1; // Not found
} private int[] computeLspTable(byte[] pattern) {
int[] lsp = new int[pattern.length];
lsp[0] = 0; // Base case
for (int i = 1; i < pattern.length; i++) {
// Start by assuming we're extending the previous LSP
int j = lsp[i - 1];
while (j > 0 && pattern[i] != pattern[j])
j = lsp[j - 1];
if (pattern[i] == pattern[j])
j++;
lsp[i] = j;
}
return lsp;
} public void StopDecode() {
if(runThread != null){
runThread.StopReceive();
}
}
}

视频解码类Decoder

总结:

  通过对视频的处理,学习到了,一些处理视频的细节点。同时加深了,依赖导致在实际项目中的使用。to android.

上一篇:关于表单----html杂记


下一篇:LoadRunner开发http协议接口之form表单脚本