AudioPolicyService 所在进程
frameworks/av/media/audioserver
➜ audioserver git:(0111) ✗ tree
.
├── Android.mk
├── audioserver.rc
├── main_audioserver.cpp
└── OWNERS
int main(int argc __unused, char **argv)
{
android::hardware::configureRpcThreadpool(4, false /*callerWillJoin*/);
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
AudioFlinger::instantiate();
ALOGI("ServiceManager: AudioFlinger instantiate done %p", sm.get());
AudioPolicyService::instantiate();
ALOGI("ServiceManager: AudioPolicyService instantiate done %p", sm.get());
instantiateVRAudioServer();
ALOGI("ServiceManager: VRAudioServer instantiate done %p", sm.get());
ALOGI("ServiceManager: done %p", sm.get());
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
AudioPolicyService::instantiate()
就这么一句话包含了整个AudioPolicyService的初始化
在 class AudioPolicyService中没有找到instantiate(),看基类BinderService
template<typename SERVICE>
class BinderService
{
public:
static status_t publish(bool allowIsolated = false,
int dumpFlags = IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT) {
sp<IServiceManager> sm(defaultServiceManager());
return sm->addService(String16(SERVICE::getServiceName()),
new SERVICE(),
allowIsolated, dumpFlags);
}
static void instantiate() { publish(); }
};
new SERVICE() -> new AudioPolicyService()
AudioPolicyService()
BinderService::instantiate 调用到 new AudioPolicyService()
触发对象的创建和初次使用AudioPolicyService() and AudioPolicyService::onFirstRef()
AudioPolicyService::AudioPolicyService()
: BnAudioPolicyService(), //构造函数
mAudioPolicyManager(NULL), //变量初始化
mAudioPolicyClient(NULL),
mPhoneState(AUDIO_MODE_INVALID),
mCaptureStateNotifier(false) {
}
void AudioPolicyService::onFirstRef()
{
{
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
mAudioPolicyClient = new AudioPolicyClient(this);
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
}
// load audio processing modules
sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects();
sp<UidPolicy> uidPolicy = new UidPolicy(this);
uidPolicy->registerSelf();
}
AudioCommandThread
AudioCommandThread: 哪里有对user permission的判断?
// Thread used to send audio config commands to audio flinger
// For audio config commands, it is necessary because audio flinger requires that the calling
// process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {}
AudioPolicyClient
class AudioPolicyClient : public AudioPolicyClientInterface
构造函数很简单
AudioPolicyClient(AudioPolicyService *service) : mAudioPolicyService(service) {}
AudioPolicyManager
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
大部分工作是这个函数完成的,在详细分析该函数之前先看下AudioPolicyInterface 和 AudioPolicyClientInterface
IAudioPolicyService AudioPolicyInterface AudioPolicyClientInterface
这三个接口有些函数比较相似,看上去比较乱。IAudioPolicyService是向外提供的接口,其他两个接口是服务它的, AudioPoliocyInterface是平台无关或者说是它会去调用平台相关的AudioPolicyClientInterface, AudioPolicyClientInterface会调用AudioFlinger service 去执行策略。
// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
// between the platform specific audio policy manager and Android generic audio policy manager.
// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class.
// This implementation makes use of the AudioPolicyClientInterface to control the activity and
// configuration of audio input and output streams.
// The platform specific audio policy manager is in charge of the audio routing and volume control
// policies for a given platform.
// The main roles of this module are:
[1] - keep track of current system state (removable device connections, phone state, user requests...).
// System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface.
[2] - process getOutput() queries received when AudioTrack objects are created: Those queries
// return a handler on an output that has been selected, configured and opened by the audio policy manager and that
// must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method.
// When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide
// to close or reconfigure the output depending on other streams using this output and current system state.
[3] - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs.
[4] - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value
// applicable to each output as a function of platform specific settings and current output route (destination device). It
// also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries).
//
AudioPolicyManager (APM)
AudioPolicyManager implements audio policy manager behavior common to all platforms
class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
{}
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
frameworks/av/services/audiopolicy/manager/AudioPolicyFactory.cpp
extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyClientInterface *clientInterface)
{
AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);
status_t status = apm->initialize();
if (status != NO_ERROR) {
delete apm;
apm = nullptr;
}
return apm;
}
// These methods should be used when finer control over APM initialization
// is needed, e.g. in tests. Must be used in conjunction with the constructor
// that only performs fields initialization. The public constructor comprises
// these steps in the following sequence:
// - field initializing constructor;
// - loadConfig;
// - initialize.
createAudioPolicyManager分为两部分:
1. new AudioPolicyManager(AudioPolicyClientInterface)
2. apm->initialize()
new AudioPolicyManager(clientInterface)
这里构造函数有两个:一个参数、两个参数,但最关键的函数还是 loadConfig()
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
: AudioPolicyManager(clientInterface, false /*forTesting*/)
{
loadConfig();
}
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
bool /*forTesting*/)
:
mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running.
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
mConfig(mHwModulesAll, mOutputDevicesAll, mInputDevicesAll, mDefaultOutputDevice),
mAudioPortGeneration(1),
mEnableAudioLoopback(false),
mIsCERegion(false),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
mBeaconMuted(false),
mTtsOutputAvailable(false),
mMasterMono(false),
mMusicEffectOutput(AUDIO_IO_HANDLE_NONE)
{
}
loadConfig()
audio_policy_configuration.xml的格式
audio_policy_configuration.xml
<!-- Modules section:
There is one section per audio HW module present on the platform.
Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
The module names are the same as in current .conf file:
“primary”, “A2DP”, “remote_submix”, “USB”
Each module will contain the following sections:
“devicePorts”: a list of device descriptors for all input and output devices accessible via this
module.
This contains both permanently attached devices and removable devices.
“mixPorts”: listing all output and input streams exposed by the audio HAL
(HAL 不导出devicePorts?)
“routes”: list of possible connections between input and output devices or between stream and devices.
"route": is defined by an attribute:
-"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
-"sink": the sink involved in this route
-"sources": all the sources than can be connected to the sink via vis route
“attachedDevices”: permanently attached devices.
The attachedDevices section is a list of devices names. The names correspond to device names
defined in <devicePorts> section.
“defaultOutputDevice”: device to be used by default when no policy rule applies
-->
deserializeAudioPolicyXmlConfig解析配置文件
deserializeAudioPolicyXmlConfig(AudioPolicyConfig &config) ->
deserializeAudioPolicyFile(audioPolicyXmlConfigFile, //"audio_policy_configuration.xml"
&config);
frameworks/av/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
status_t deserializeAudioPolicyFile(const char *fileName, AudioPolicyConfig *config)
{
PolicySerializer serializer;
return serializer.deserialize(fileName, config);
}
template<typename E, typename C>
struct AndroidCollectionTraits {
typedef sp<E> Element;
typedef C Collection;
typedef void* PtrSerializingCtx;
static status_t addElementToCollection(const Element &element, Collection *collection) {
return collection->add(element) >= 0 ? NO_ERROR : BAD_VALUE;
}
};
// A profile section contains a name, one audio format and the list of supported sampling rates
// and channel masks for this format
struct AudioProfileTraits : public AndroidCollectionTraits<AudioProfile, AudioProfileVector>
{
static constexpr const char *tag = "profile";
static constexpr const char *collectionTag = "profiles";
struct Attributes
{
static constexpr const char *samplingRates = "samplingRates";
static constexpr const char *format = "format";
static constexpr const char *channelMasks = "channelMasks";
};
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
};
struct MixPortTraits : public AndroidCollectionTraits<IOProfile, IOProfileCollection>
{
static constexpr const char *tag = "mixPort";
static constexpr const char *collectionTag = "mixPorts";
struct Attributes
{
static constexpr const char *name = "name";
static constexpr const char *role = "role";
static constexpr const char *roleSource = "source"; /**< <attribute role source value>. */
static constexpr const char *flags = "flags";
static constexpr const char *maxOpenCount = "maxOpenCount";
static constexpr const char *maxActiveCount = "maxActiveCount";
};
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: GainTraits
};
struct DevicePortTraits : public AndroidCollectionTraits<DeviceDescriptor, DeviceVector>
{
static constexpr const char *tag = "devicePort";
static constexpr const char *collectionTag = "devicePorts";
struct Attributes
{
/** <device tag name>: any string without space. */
static constexpr const char *tagName = "tagName";
static constexpr const char *type = "type"; /**< <device type>. */
static constexpr const char *role = "role"; /**< <device role: sink or source>. */
static constexpr const char *roleSource = "source"; /**< <attribute role source value>. */
/** optional: device address, char string less than 64. */
static constexpr const char *address = "address";
/** optional: the list of encoded audio formats that are known to be supported. */
static constexpr const char *encodedFormats = "encodedFormats";
};
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: GainTraits (optional)
};
struct RouteTraits : public AndroidCollectionTraits<AudioRoute, AudioRouteVector>
{
static constexpr const char *tag = "route";
static constexpr const char *collectionTag = "routes";
struct Attributes
{
static constexpr const char *type = "type"; /**< <route type>: mix or mux. */
static constexpr const char *typeMix = "mix"; /**< type attribute mix value. */
static constexpr const char *sink = "sink"; /**< <sink: involved in this route>. */
/** sources: all source that can be involved in this route. */
static constexpr const char *sources = "sources";
};
typedef HwModule *PtrSerializingCtx;
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
};
struct ModuleTraits : public AndroidCollectionTraits<HwModule, HwModuleCollection>
{
static constexpr const char *tag = "module";
static constexpr const char *collectionTag = "modules";
static constexpr const char *childAttachedDevicesTag = "attachedDevices";
static constexpr const char *childAttachedDeviceTag = "item";
static constexpr const char *childDefaultOutputDeviceTag = "defaultOutputDevice";
struct Attributes
{
static constexpr const char *name = "name";
static constexpr const char *version = "halVersion";
};
typedef AudioPolicyConfig *PtrSerializingCtx;
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: mixPortTraits, devicePortTraits, and routeTraits
// Need to call deserialize on each child
};
每个xxxTraints都有自己的deserialize 函数
Return<ModuleTraits::Element> ModuleTraits::deserialize(const xmlNode *cur, PtrSerializingCtx ctx)
{
1]: <module name="primary" halVersion="2.0">
std::string name = getXmlAttribute(cur, Attributes::name);
uint32_t versionMajor = 0, versionMinor = 0;
std::string versionLiteral = getXmlAttribute(cur, Attributes::version);
if (!versionLiteral.empty()) {
sscanf(versionLiteral.c_str(), "%u.%u", &versionMajor, &versionMinor);
}
Element module = new HwModule(name.c_str(), versionMajor, versionMinor);
// Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
2]: <mixPorts>
MixPortTraits::Collection mixPorts;
status_t status = deserializeCollection<MixPortTraits>(cur, &mixPorts, NULL);
if (status != NO_ERROR) {
return Status::fromStatusT(status);
}
module->setProfiles(mixPorts); // class HwModule -> addProfile-> addOut/InputProfile:
// mOut/InputProfiles.add(profile) and mPorts.add(profile)
3]: <devicePorts>
DevicePortTraits::Collection devicePorts;
status = deserializeCollection<DevicePortTraits>(cur, &devicePorts, NULL);
if (status != NO_ERROR) {
return Status::fromStatusT(status);
}
module->setDeclaredDevices(devicePorts); // mDeclaredDevices = devices; mPorts.add(device[i])
4]: <routes>
RouteTraits::Collection routes;
status = deserializeCollection<RouteTraits>(cur, &routes, module.get());
if (status != NO_ERROR) {
return Status::fromStatusT(status);
}
module->setRoutes(routes);
5]: attachedDevices
sp<DeviceDescriptor> device = module->getDeclaredDevices().
getDeviceFromTagName(std::string(reinterpret_cast<const char*>(
attachedDevice.get())));
ctx->addDevice(device); // ctx指的是 class AudioPolicyConfig
6]: defaultOutputDevice
sp<DeviceDescriptor> device = module->getDeclaredDevices().getDeviceFromTagName(
std::string(reinterpret_cast<const char*>(defaultOutputDevice.get())));
ctx->setDefaultOutputDevice(device);
}