Android音频(10)——多App同时录音实现

Android音频(10)——多App同时录音实现

一、使用c++编写录音程序

1. PCM音频数据是原始音频数据,无法使用播放器播放,需要给它加上一个头部,表明声音有几个通道,采样率是多少等等。将PCM音频数据转换为WAV格式,这样其它播放器才能播放出来。

2. 录音时要确定3个参数(1)采样率:一秒钟对声波采样的次数。常用的采样率有8000,11025,22050,32000,44100.高版本的Android应该可以支持更高的采样率。

(2)每个采样值使用多少bit表示 目前Android系统上固定为16bit

(3)声道数Stereo:立体声,每个采样点记录左右声道的值Mono: 单声道

3. tinyplay工具只能播放双声道的音频数据。

4.测试程序(1)AudioRecordTest.cpp,用于不做pcm数据

#include

#include

#include

using namespace android;

//==============================================

// Audio Record Defination

//==============================================

#ifdef LOG_TAG

#undef LOG_TAG

#endif

#define LOG_TAG "AudioRecordTest"

static pthread_t g_AudioRecordThread;

static pthread_t * g_AudioRecordThreadPtr = NULL;

volatile bool g_bQuitAudioRecordThread = false;

volatile int g_iInSampleTime = 0;

int g_iNotificationPeriodInFrames = 8000/10;

// g_iNotificationPeriodInFrames should be change when sample rate changes.

static void * AudioRecordThread(int sample_rate, int channels, void *fileName)

{

uint64_t inHostTime = 0;

void * inBuffer = NULL;

audio_source_t inputSource = AUDIO_SOURCE_MIC;

audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;

audio_channel_mask_t channelConfig = AUDIO_CHANNEL_IN_MONO;

int bufferSizeInBytes;

int sampleRateInHz = sample_rate; //8000; //44100;

android::AudioRecord * pAudioRecord = NULL;

FILE * g_pAudioRecordFile = NULL;

char * strAudioFile = (char *)fileName;

int iNbChannels = channels; // 1 channel for mono, 2 channel for streo

int iBytesPerSample = 2; // 16bits pcm, 2Bytes

int frameSize = 0; // frameSize = iNbChannels * iBytesPerSample

size_t minFrameCount = 0; // get from AudroRecord object

int iWriteDataCount = 0; // how many data are there write to file

// log the thread id for debug info

ALOGD("%s Thread ID = %d \n", __FUNCTION__, pthread_self());

g_iInSampleTime = 0;

g_pAudioRecordFile = fopen(strAudioFile, "wb+");

//printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);

//iNbChannels = (channelConfig == AUDIO_CHANNEL_IN_STEREO) ? 2 : 1;

if (iNbChannels == 2) {

channelConfig = AUDIO_CHANNEL_IN_STEREO;

}

printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);

frameSize = iNbChannels * iBytesPerSample;

android::status_t status = android::AudioRecord::getMinFrameCount(

&minFrameCount, sampleRateInHz, audioFormat, channelConfig);

if(status != android::NO_ERROR)

{

ALOGE("%s AudioRecord.getMinFrameCount fail \n", __FUNCTION__);

goto exit ;

}

ALOGE("sampleRateInHz = %d minFrameCount = %d iNbChannels = %d channelConfig = 0x%x frameSize = %d ",

sampleRateInHz, minFrameCount, iNbChannels, channelConfig, frameSize);

bufferSizeInBytes = minFrameCount * frameSize;

inBuffer = malloc(bufferSizeInBytes);

if(inBuffer == NULL)

{

ALOGE("%s alloc mem failed \n", __FUNCTION__);

goto exit ;

}

g_iNotificationPeriodInFrames = sampleRateInHz/10;

pAudioRecord = new android::AudioRecord();

if(NULL == pAudioRecord)

{

ALOGE(" create native AudioRecord failed! ");

goto exit;

}

pAudioRecord->set( inputSource,

sampleRateInHz,

audioFormat,

channelConfig,

0,

NULL, //AudioRecordCallback,

NULL,

0,

true,

0);

if(pAudioRecord->initCheck() != android::NO_ERROR)

{

ALOGE("AudioTrack initCheck error!");

goto exit;

}

if(pAudioRecord->start()!= android::NO_ERROR)

{

ALOGE("AudioTrack start error!");

goto exit;

}

while (!g_bQuitAudioRecordThread)

{

int readLen = pAudioRecord->read(inBuffer, bufferSizeInBytes);

int writeResult = -1;

if(readLen > 0)

{

iWriteDataCount += readLen;

if(NULL != g_pAudioRecordFile)

{

writeResult = fwrite(inBuffer, 1, readLen, g_pAudioRecordFile);

if(writeResult < readLen)

{

ALOGE("Write Audio Record Stream error");

}

}

//ALOGD("readLen = %d writeResult = %d iWriteDataCount = %d", readLen, writeResult, iWriteDataCount);

}

else

{

ALOGE("pAudioRecord->read readLen = 0");

}

}

exit:

if(NULL != g_pAudioRecordFile)

{

fflush(g_pAudioRecordFile);

fclose(g_pAudioRecordFile);

g_pAudioRecordFile = NULL;

}

if(pAudioRecord)

{

pAudioRecord->stop();

//delete pAudioRecord;

//pAudioRecord == NULL;

}

if(inBuffer)

{

free(inBuffer);

inBuffer = NULL;

}

ALOGD("%s Thread ID = %d quit\n", __FUNCTION__, pthread_self());

return NULL;

}

int main(int argc, char **argv)

{

if (argc != 4)

{

printf("Usage:\n");

printf("%s \n", argv[0]);

return -1;

}

AudioRecordThread(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0), argv[3]);

return 0;

}

View Code

(2)pcm2wav.cpp,用于将pcm转换为wav格式

#include

#include

#include

/* https://blog.csdn.net/u010011236/article/details/53026127 */

/**

* Convert PCM16LE raw data to WAVE format

* @param pcmpath Input PCM file.

* @param channels Channel number of PCM file.

* @param sample_rate Sample rate of PCM file.

* @param wavepath Output WAVE file.

*/

int simplest_pcm16le_to_wave(const char *pcmpath, int sample_rate, int channels, const char *wavepath)

{

typedef struct WAVE_HEADER{

char fccID[4]; //ÄÚÈÝΪ""RIFF

unsigned long dwSize; //×îºóÌîд£¬WAVE¸ñʽÒôƵµÄ´óС

char fccType[4]; //ÄÚÈÝΪ"WAVE"

}WAVE_HEADER;

typedef struct WAVE_FMT{

char fccID[4]; //ÄÚÈÝΪ"fmt "

unsigned long dwSize; //ÄÚÈÝΪWAVE_FMTÕ¼µÄ×Ö½ÚÊý£¬Îª16

unsigned short wFormatTag; //Èç¹ûΪPCM£¬¸ÄֵΪ 1

unsigned short wChannels; //ͨµÀÊý£¬µ¥Í¨µÀ=1£¬Ë«Í¨µÀ=2

unsigned long dwSamplesPerSec;//²ÉÓÃÆµÂÊ

unsigned long dwAvgBytesPerSec;/* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */

unsigned short wBlockAlign;//==wChannels*uiBitsPerSample/8

unsigned short uiBitsPerSample;//ÿ¸ö²ÉÑùµãµÄbitÊý£¬8bits=8, 16bits=16

}WAVE_FMT;

typedef struct WAVE_DATA{

char fccID[4]; //ÄÚÈÝΪ"data"

unsigned long dwSize; //==NumSamples*wChannels*uiBitsPerSample/8

}WAVE_DATA;

#if 0

if(channels==2 || sample_rate==0)

{

channels = 2;

sample_rate = 44100;

}

#endif

int bits = 16;

WAVE_HEADER pcmHEADER;

WAVE_FMT pcmFMT;

WAVE_DATA pcmDATA;

unsigned short m_pcmData;

FILE *fp, *fpout;

fp = fopen(pcmpath, "rb+");

if(fp==NULL)

{

printf("Open pcm file error.\n");

return -1;

}

fpout = fopen(wavepath, "wb+");

if(fpout==NULL)

{

printf("Create wav file error.\n");

return -1;

}

/* WAVE_HEADER */

memcpy(pcmHEADER.fccID, "RIFF", strlen("RIFF"));

memcpy(pcmHEADER.fccType, "WAVE", strlen("WAVE"));

fseek(fpout, sizeof(WAVE_HEADER), 1); //1=SEEK_CUR

/* WAVE_FMT */

memcpy(pcmFMT.fccID, "fmt ", strlen("fmt "));

pcmFMT.dwSize = 16;

pcmFMT.wFormatTag = 1;

pcmFMT.wChannels = channels;

pcmFMT.dwSamplesPerSec = sample_rate;

pcmFMT.uiBitsPerSample = bits;

/* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */

pcmFMT.dwAvgBytesPerSec = pcmFMT.dwSamplesPerSec*pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;

/* ==wChannels*uiBitsPerSample/8 */

pcmFMT.wBlockAlign = pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;

fwrite(&pcmFMT, sizeof(WAVE_FMT), 1, fpout);

/* WAVE_DATA */

memcpy(pcmDATA.fccID, "data", strlen("data"));

pcmDATA.dwSize = 0;

fseek(fpout, sizeof(WAVE_DATA), SEEK_CUR);

fread(&m_pcmData, sizeof(unsigned short), 1, fp);

while(!feof(fp))

{

pcmDATA.dwSize += 2;

fwrite(&m_pcmData, sizeof(unsigned short), 1, fpout);

fread(&m_pcmData, sizeof(unsigned short), 1, fp);

}

/*pcmHEADER.dwSize = 44 + pcmDATA.dwSize;*/

//ÐÞ¸Äʱ¼ä£º2018Äê1ÔÂ5ÈÕ

pcmHEADER.dwSize = 36 + pcmDATA.dwSize;

rewind(fpout);

fwrite(&pcmHEADER, sizeof(WAVE_HEADER), 1, fpout);

fseek(fpout, sizeof(WAVE_FMT), SEEK_CUR);

fwrite(&pcmDATA, sizeof(WAVE_DATA), 1, fpout);

fclose(fp);

fclose(fpout);

return 0;

}

int main(int argc, char **argv)

{

if (argc != 5)

{

printf("Usage:\n");

printf("%s \n", argv[0]);

return -1;

}

simplest_pcm16le_to_wave(argv[1], strtol(argv[2], NULL, 0), strtol(argv[3], NULL, 0), argv[4]);

return 0;

}

View Code

(3)Android.mk

LOCAL_PATH:= $(call my-dir)

include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \

AudioRecordTest.cpp

LOCAL_SHARED_LIBRARIES := \

libcutils \

libutils \

libmedia

LOCAL_MODULE:= audio_record_test

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \

pcm2wav.cpp

LOCAL_SHARED_LIBRARIES := \

libcutils \

libutils \

libmedia

LOCAL_MODULE:= pcm2wav

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)

View Code

然后使用tinyplay播放产生的wav文件。

录音程序参考:Android Native C++ 层中使用AudioRecord录制PCM音频: https://blog.csdn.net/romantic_energy/article/details/50521970

pcm转wav参考:PCM、WAV格式介绍及用C语言实现PCM转WAV: https://blog.csdn.net/u010011236/article/details/53026127

4. 耳机的只有一边播放有声音的原因

./AudioRecordTest 44100 2 my.pcm./pcm2wav my.pcm 44100 2 my.wav tinyplay my.wav 只有1个耳朵都听到声音

./AudioRecordTest 44100 1 my.pcm./pcm2wav my.pcm 44100 1 my.wav tinyplay 不能播放单声道声音, 用其他播放器来播放my.wav,2个耳朵都听到声音

为何录音时用双声通,播放时只有1个耳朵有声音?反而录音时用单声通,播放时2个耳朵都有声音?

答案:a. 硬件上、驱动上是双声道的; 但是我们只接了一个MIC,所以驱动程序录音时得到的双声道数据中,其中一个声道数据恒为0b. AudioRecordTest录音时如果指定了双声道,那么得到的PCM数据里其中一个声道恒为0,它播放时就会导致只有一个耳朵有声音c. AudioRecordTest录音时如果指定了单声道,那么得到的PCM数据只含有一个声道数据,它是硬件左、右声道的混合,这个混合是AudioFlinger系统实现的.在播放时单声道数据时, AudioFlinger系统会把单声道数据既发给硬件Left DAC(左声道)、也发给硬件Right DAC(右声道),所以2个耳朵都可以听到声音

二、录音框架及代码流程

1. playbackThread 就是MixerThread,多个App对应着一个线程。

2. 原生的Android录音流程根据App传入的声音来源找到对应的device找到profile(audio_policy.conf产生的)根据profile找到module,即对应一个声卡,然后加载对应声卡的HAL文件调用HAL文件中的openInput()来打开一个输入通道。

3. 录音时只要App执行了set(),就会创建一个RecordThread(),多个App可能导致并发访问声卡,导致竞争访问声卡数据的问题。

4. 录音框架及代码流程a. APP创建、设置AudioRecord, 指定了声音来源: inputSource, 比如: AUDIO_SOURCE_MIC,还指定了采样率、通道数、格式等参数b. AudioPolicyManager根据inputSource等参数确定录音设备: devicec. AudioFlinger创建一个RecordThread, 以后该线程将从上述device读取声音数据d. 在RecordThread内部为APP的AudioRecord创建一个对应的RecordTrack,APP的AudioRecord 与 RecordThread内部的RecordTrack 通过共享内存传递数据e. RecordThread从HAL中得到数据, 再通过内部的RecordTrack把数据传给APP的AudioRecord

注意: 在原生代码中,APP的一个AudioRecord会导致创建一个RecordThread,在一个device上有可能存在多个RecordThread,任意时刻只能有一个RecordThread在运行,所以只能有一个APP在录音,不能多个APP同时录音

三、修改代码支持多APP同时录音

修改AudioPolicyManager.cpp,补丁如下:

Subject: [PATCH] v2, support Multi AudioRecord at same time

---

AudioPolicyManager.cpp | 11 +++++++++++

1 file changed, 11 insertions(+)

diff --git a/AudioPolicyManager.cpp b/AudioPolicyManager.cpp

index 536987a..6c87508 100644

--- a/AudioPolicyManager.cpp

+++ b/AudioPolicyManager.cpp

@@ -1356,6 +1356,17 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource,

config.channel_mask = channelMask;

config.format = format;

+ /* check wether have an AudioInputDescriptor use the same profile */

+ for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {

+ sp desc;

+ desc = mInputs.valueAt(input_index);

+ if (desc->mProfile == profile) {

+ desc->mOpenRefCount++; // ÒýÓüÆÊý¼Ó1

+ desc->mSessions.add(session); // session

+ return desc->mIoHandle;

+ }

+ }

+

status_t status = mpClientInterface->openInput(profile->mModule->mHandle,

&input,

&config,

--

1.9.1

View Code

🖌️ 相关文章

海岛奇兵 新手野人玩法指南 野人怎么玩
365网站打不开了

海岛奇兵 新手野人玩法指南 野人怎么玩

📅 07-09 👁️ 7101
2026世界杯中国队有希望出线?足球振兴的秘密与挑战
365网站打不开了

2026世界杯中国队有希望出线?足球振兴的秘密与挑战

📅 07-08 👁️ 4138