都说iOS最恶心的部分是流媒体,其中恶心的恶心之处更在即时语音。
所以我们先不谈即时语音,研究一下,iOS中声音采集与播放的实现。
要在iOS设备上实现录音和播放功能,苹果提供了简单的做法,那就是利用AVAudioRecorder和AVAudioPlayer。度娘大多数也是如此。但是这种方法有很大的局限性。单说说这种做法:录音,首先得设置录音文件路径,然后录音数据直接写入了文件。播放也是首先给出文件路径,等到音频整个加载完成了,才能开始播放。这相当不灵活。
我的做法是利用音频队列AudioQueue,将声音暂存至缓冲区,然后从缓冲区取出音频数据,进行播放。
声音采集:
使用AudioQueue框架以队列的形式处理音频数据。因此使用时需要给队列分配缓存空间,由回调(Callback)函数完成向队列缓存读写音频数据的功能。
一个Recording Audio Queue,包括Buffer(缓冲器)组成的Buffer Queue(缓冲队列),以及一个Callback(回调)。实现主要步骤为:
- 设置音频的参数
- 准备并启动声音采集的音频队列
- 在回调函数中处理采集到的音频Buffer,在这里是暂存在了一个Byte数组里,提供给播放端使用
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
Record.h #import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> #import <CoreAudio/CoreAudioTypes.h> #import "AudioConstant.h" // use Audio Queue typedef struct AQCallbackStruct
{ AudioStreamBasicDescription mDataFormat;
AudioQueueRef queue;
AudioQueueBufferRef mBuffers[kNumberBuffers];
AudioFileID outputFile;
unsigned long frameSize;
long long recPtr;
int run;
} AQCallbackStruct; @interface Record : NSObject
{ AQCallbackStruct aqc;
AudioFileTypeID fileFormat;
long audioDataLength;
Byte audioByte[999999];
long audioDataIndex;
} - ( id ) init;
- ( void ) start;
- ( void ) stop;
- ( void ) pause;
- (Byte *) getBytes; - ( void ) processAudioBuffer:(AudioQueueBufferRef) buffer withQueue:(AudioQueueRef) queue;
@property ( nonatomic , assign) AQCallbackStruct aqc;
@property ( nonatomic , assign) long audioDataLength;
@end |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
Record.mm #import "Record.h" @implementation Record
@synthesize aqc;
@synthesize audioDataLength;
static void AQInputCallback ( void * inUserData,
AudioQueueRef inAudioQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
unsigned long inNumPackets,
const AudioStreamPacketDescription * inPacketDesc)
{ Record * engine = (__bridge Record *) inUserData;
if (inNumPackets > 0)
{
[engine processAudioBuffer:inBuffer withQueue:inAudioQueue];
}
if (engine.aqc.run)
{
AudioQueueEnqueueBuffer(engine.aqc.queue, inBuffer, 0, NULL );
}
} - ( id ) init
{ self = [ super init];
if ( self )
{
aqc.mDataFormat.mSampleRate = kSamplingRate;
aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;
aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |kLinearPCMFormatFlagIsPacked;
aqc.mDataFormat.mFramesPerPacket = 1;
aqc.mDataFormat.mChannelsPerFrame = kNumberChannels;
aqc.mDataFormat.mBitsPerChannel = kBitsPerChannels;
aqc.mDataFormat.mBytesPerPacket = kBytesPerFrame;
aqc.mDataFormat.mBytesPerFrame = kBytesPerFrame;
aqc.frameSize = kFrameSize;
AudioQueueNewInput(&aqc.mDataFormat, AQInputCallback, (__bridge void *)( self ), NULL , kCFRunLoopCommonModes,0, &aqc.queue);
for ( int i=0;i<kNumberBuffers;i++)
{
AudioQueueAllocateBuffer(aqc.queue, aqc.frameSize, &aqc.mBuffers[i]);
AudioQueueEnqueueBuffer(aqc.queue, aqc.mBuffers[i], 0, NULL );
}
aqc.recPtr = 0;
aqc.run = 1;
}
audioDataIndex = 0;
return self ;
} - ( void ) dealloc
{ AudioQueueStop(aqc.queue, true );
aqc.run = 0;
AudioQueueDispose(aqc.queue, true );
} - ( void ) start
{ AudioQueueStart(aqc.queue, NULL );
} - ( void ) stop
{ AudioQueueStop(aqc.queue, true );
} - ( void ) pause
{ AudioQueuePause(aqc.queue);
} - (Byte *)getBytes { return audioByte;
} - ( void ) processAudioBuffer:(AudioQueueBufferRef) buffer withQueue:(AudioQueueRef) queue
{ NSLog (@ "processAudioData :%ld" , buffer->mAudioDataByteSize);
//处理data:忘记oc怎么copy内存了,于是采用的C++代码,记得把类后缀改为.mm。同Play
memcpy(audioByte+audioDataIndex, buffer->mAudioData, buffer->mAudioDataByteSize);
audioDataIndex +=buffer->mAudioDataByteSize;
audioDataLength = audioDataIndex;
} @end |
声音播放:
同采集一样,播放主要步骤如下:
- 设置音频参数(需和采集时设置参数一样)
- 取得缓存的音频Buffer
- 准备并启动声音播放的音频队列
- 在回调函数中处理Buffer
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
Play.h #import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> #import "AudioConstant.h" @interface Play : NSObject
{ //音频参数
AudioStreamBasicDescription audioDescription;
// 音频播放队列
AudioQueueRef audioQueue;
// 音频缓存
AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE];
} -( void )Play:(Byte *)audioByte Length:( long )len;
@end |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
Play.mm #import "Play.h" @interface Play()
{ Byte *audioByte;
long audioDataIndex;
long audioDataLength;
} @end @implementation Play
//回调函数(Callback)的实现 static void BufferCallback( void *inUserData,AudioQueueRef inAQ,AudioQueueBufferRef buffer){
NSLog (@ "processAudioData :%u" , (unsigned int )buffer->mAudioDataByteSize);
Play* player=(__bridge Play*)inUserData;
[player FillBuffer:inAQ queueBuffer:buffer];
} //缓存数据读取方法的实现 -( void )FillBuffer:(AudioQueueRef)queue queueBuffer:(AudioQueueBufferRef)buffer
{ if (audioDataIndex + EVERY_READ_LENGTH < audioDataLength)
{
memcpy(buffer->mAudioData, audioByte+audioDataIndex, EVERY_READ_LENGTH);
audioDataIndex += EVERY_READ_LENGTH;
buffer->mAudioDataByteSize =EVERY_READ_LENGTH;
AudioQueueEnqueueBuffer(queue, buffer, 0, NULL );
}
} -( void )SetAudioFormat
{ ///设置音频参数
audioDescription.mSampleRate = kSamplingRate; //采样率
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger; //|kAudioFormatFlagIsNonInterleaved;
audioDescription.mChannelsPerFrame = kNumberChannels;
audioDescription.mFramesPerPacket = 1; //每一个packet一侦数据
audioDescription.mBitsPerChannel = kBitsPerChannels; //av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*8;//每个采样点16bit量化
audioDescription.mBytesPerFrame = kBytesPerFrame;
audioDescription.mBytesPerPacket = kBytesPerFrame;
[ self CreateAudioQueue];
} -( void )CreateAudioQueue
{ [ self Cleanup];
//使用player的内部线程播
AudioQueueNewOutput(&audioDescription, BufferCallback, (__bridge void *)( self ), nil , nil , 0, &audioQueue);
if (audioQueue)
{
////添加buffer区
for ( int i=0;i<QUEUE_BUFFER_SIZE;i++)
{
int result = AudioQueueAllocateBuffer(audioQueue, EVERY_READ_LENGTH, &audioQueueBuffers[i]);
///创建buffer区,MIN_SIZE_PER_FRAME为每一侦所需要的最小的大小,该大小应该比每次往buffer里写的最大的一次还大
NSLog (@ "AudioQueueAllocateBuffer i = %d,result = %d" ,i,result);
}
}
} -( void )Cleanup
{ if (audioQueue)
{
NSLog (@ "Release AudioQueueNewOutput" );
[ self Stop];
for ( int i=0; i < QUEUE_BUFFER_SIZE; i++)
{
AudioQueueFreeBuffer(audioQueue, audioQueueBuffers[i]);
audioQueueBuffers[i] = nil ;
}
audioQueue = nil ;
}
} -( void )Stop
{ NSLog (@ "Audio Player Stop" );
AudioQueueFlush(audioQueue);
AudioQueueReset(audioQueue);
AudioQueueStop(audioQueue,TRUE);
} -( void )Play:(Byte *)byte Length:( long )len
{ [ self Stop];
audioByte = byte;
audioDataLength = len;
NSLog (@ "Audio Play Start >>>>>" );
[ self SetAudioFormat];
AudioQueueReset(audioQueue);
audioDataIndex = 0;
for ( int i=0; i<QUEUE_BUFFER_SIZE; i++)
{
[ self FillBuffer:audioQueue queueBuffer:audioQueueBuffers[i]];
}
AudioQueueStart(audioQueue, NULL );
} @end |
以上,实现了通过内存缓存,声音的采集和播放,包括了声音采集,暂停,结束,播放等主要流程。
PS:由于本人水品有限加之这方面资料较少,只跑通了正常流程,暂时没做异常处理。采集的声音Buffer限定大小每次只有十来秒钟的样子,这个留给需要的人自己去优化了。
踩
(0)
赞
(0)
举报
评论 一句话评论(0)