Android P之Camera HAL3流程分析

参考文献

[Android O] Camera 服务启动流程简析
[Android O] HAL3 之 Open Camera2 流程(零)—— 概览
[Android O] HAL3 之 Open Camera2 流程(一)—— 从 App 到 CameraService
[Android O] HAL3 之 Open Camera2 流程(二)—— 从 CameraService 到 HAL Service
[Android O] HAL3 之 Open Camera2 流程(三,完结)—— 从 HAL Service 到 Camera HAL

 

android6.0源码分析之Camera API2.0简介
android6.0源码分析之Camera2 HAL分析
android6.0源码分析之Camera API2.0下的初始化流程分析
android6.0源码分析之Camera API2.0下的Preview(预览)流程分析
android6.0源码分析之Camera API2.0下的Capture流程分析
android6.0源码分析之Camera API2.0下的video流程分析
 

Camera API2.0的应用

Android Camera:总结

Android camera子系统HAL层介绍集锦

高通平台

Camera2 数据流从framework到Hal源码分析

mm-camera层frame数据流源码分析

在 Android O 中,系统启动时,就会启动 CameraProvider 服务。它将 Camera HAL 从 cameraserver 进程中分离出来,作为一个独立进程 [email protected] 来控制 HAL。这两个进程之间通过 HIDL 机制进行通信。

这样的改动源自于 Android O 版本加入的 Treble 机制,它的主要功能(如下图所示)是将 service 与 HAL 隔离,以方便 HAL 部分进行独立升级。这其实和 APP 与 Framework 之间的 Binder 机制类似,通过引入一个进程间通信机制而针对不同层级进行解耦(从 Local call 变成了 Remote call)。

Android P之Camera HAL3流程分析

注册CameraProvider服务

创建服务端的 CameraProviderImpl 对象,后续client端会调用
hardware/interfaces/camera/provider/2.4/default/[email protected]_64.rc
service vendor.camera-provider-2-4 /vendor/bin/hw/[email protected]_64
    class hal
    user cameraserver
    group audio camera input drmrpc
    ioprio rt 4
    capabilities SYS_NICE
    writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks
hardware/interfaces/camera/provider/2.4/default/service.cpp
int main()
{
    android::ProcessState::initWithDriver("/dev/vndbinder");
    return defaultPassthroughServiceImplementation<ICameraProvider>("legacy/0", /*maxThreads*/ 6);
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/depend/Android.mk
LOCAL_MODULE := [email protected]

alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/depend/instance.cpp
ICameraProvider*
HIDL_FETCH_ICameraProvider(const char* name){
    return createICameraProvider_V2_4(name, getCameraDeviceManager());
}
ICameraProvider*
createICameraProvider_V2_4(const char* providerName, NSCam::ICameraDeviceManager* manager){
    auto provider = new CameraProviderImpl(providerName, manager);
    provider->initialize();
    return provider;
}

获得CameraDeviceManager对象
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/depend/instance.cpp
NSCam::ICameraDeviceManager*
getCameraDeviceManager(){
    static NSCam::CameraDeviceManagerImpl singleton(getProviderType().c_str());
    static bool init = singleton.initialize();
    return &singleton;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/CameraDeviceManagerBase.cpp
CameraDeviceManagerBase::
initialize(){
    auto loadDeviceFactory = [](char const* libname, char const* symbol) {
        VirtEnumDeviceFactory item;
        item.mLibHandle = ::dlopen(libname, RTLD_NOW);
        *(void **)(&item.mCreateVirtualCameraDevice) = ::dlsym(item.mLibHandle, symbol);
        return item;
    };
    //从动态库中加载 createVirtualCameraDevice 函数的实现赋值给 mCreateVirtualCameraDevice
    mVirtEnumDeviceFactoryMap[3] = loadDeviceFactory("libmtkcam_device3.so", "createVirtualCameraDevice");
    enumerateDevicesLocked();
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/CameraDeviceManagerBase_camera.cpp
CameraDeviceManagerBase::
enumerateDevicesLocked(){
    onEnumerateDevicesLocked();
}
CameraDeviceManagerImpl::
onEnumerateDevicesLocked(){
    IHalLogicalDeviceList* pHalDeviceList;
    pHalDeviceList = MAKE_HalLogicalDeviceList();//会加载libmtkcam_modulefactory_utils.so
    size_t const deviceNum = pHalDeviceList->searchDevices();
    mVirtEnumDeviceMap.setCapacity(deviceNum*2);
    //构造多个CameraDevice3Impl并保存到mVirtEnumDeviceMap中,通过instanceId区别
    for (size_t instanceId = 0; instanceId < deviceNum; instanceId++)
    {
        sp<IMetadataProvider> pMetadataProvider;
        pMetadataProvider = IMetadataProvider::create(instanceId);
        NSMetadataProviderManager::add(instanceId, pMetadataProvider.get());
        addVirtualDevicesLocked(instanceId, pMetadataProvider);// 获得对应deviceid的元数据信息
    }
}
将创建的CameraDevice3Impl对象封装成VirtEnumDevice对象,保存在 mVirtEnumDeviceMap
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/CameraDeviceManagerBase_camera.cpp
CameraDeviceManagerBase::
addVirtualDevicesLocked(uint32_t const instanceId,
                                  ::android::sp<IMetadataProvider> pMetadataProvider)
{
    auto add_virtual_device = [&](IVirtualDevice* pVirtualDevice){
            if ( pVirtualDevice != nullptr ) {
                if  ( auto pInfo = new VirtEnumDevice ) {
                    if ( pVirtualDevice->hasFlashUnit() ) {
                        mIsSetTorchModeSupported = true;
                        pInfo->mTorchModeStatus = (uint32_t)ETorchModeStatus::AVAILABLE_OFF;
                    }
                    pInfo->mVirtDevice = pVirtualDevice;
                    pInfo->mInstanceId = pVirtualDevice->getInstanceId();
                    mVirtEnumDeviceMap.add(pVirtualDevice->getInstanceName(), pInfo);
                }
            }
        };
    auto create_and_add_virtual_device = [&](uint32_t majorVersion){
            //调用 createVirtualCameraDevice 函数创建 CameraDevice3Impl
            auto create_device = mVirtEnumDeviceFactoryMap[majorVersion].mCreateVirtualCameraDevice;
            CreateVirtualCameraDeviceParams param = {
                .instanceId = static_cast<int32_t>(instanceId),
                .deviceType = mType.c_str(),
                .pMetadataProvider = pMetadataProvider.get(),
                .pDeviceManager = this,

            };
            auto new_device = create_device(&param);
            add_virtual_device(new_device);
        };
    create_and_add_virtual_device(3);

}

创建CameraDevice3Impl对象

alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3Factory.cpp
createVirtualCameraDevice(CreateVirtualCameraDeviceParams* params){
    auto pDevice = new CameraDevice3Impl(
        params->pDeviceManager,
        params->pMetadataProvider,
        params->deviceType,
        params->instanceId
    );
    bool bInitSuccess = pDevice->initialize(createCameraDevice3Session(info));
    return pDevice;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3SessionImpl.cpp
NSCam::ICameraDevice3Session*
createCameraDevice3Session(
    NSCam::ICameraDevice3Session::CreationInfo const& info
){
    return new NSCam::v3::CameraDevice3SessionImpl(info);
}

经过以上步骤实现了将多个CameraDevice3Impl并保存到CameraDeviceManager的数组mVirtEnumDeviceMap中,而CameraProvider会保存CameraDeviceManager,这样client端可以通过调用CameraProvider->CameraDeviceManager->CameraDevice3Impl。
=================================================================================================

 

注册cameraserver服务

CameraService会创建CameraProviderManager对象,然后进行initialize
alps/frameworks/av/camera/cameraserver/cameraserver.rc
service cameraserver /system/bin/cameraserver
    class main
    user cameraserver
    group audio camera input drmrpc
    ioprio rt 4
    writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks
alps/frameworks/av/camera/cameraserver/main_cameraserver.cpp
int main(int argc __unused, char** argv __unused){
    sp<IServiceManager> sm = defaultServiceManager();
    CameraService::instantiate();
    ProcessState::self()->startThreadPool();
    IPCThreadState::self()->joinThreadPool();
}

void CameraService::onFirstRef(){
    BnCameraService::onFirstRef();
    res = enumerateProviders();
    CameraService::pingCameraServiceProxy();
}
status_t CameraService::enumerateProviders() {
    status_t res;
    std::vector<std::string> deviceIds;
    {
        if (nullptr == mCameraProviderManager.get()) {
            mCameraProviderManager = new CameraProviderManager();
            res = mCameraProviderManager->initialize(this);
        }
        mCameraProviderManager->setUpVendorTags();
        if (nullptr == mFlashlight.get()) {
            mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
        }
        res = mFlashlight->findFlashUnits();
        deviceIds = mCameraProviderManager->getCameraDeviceIds();
    }
    for (auto& cameraId : deviceIds) {
        String8 id8 = String8(cameraId.c_str());
        onDeviceStatusChanged(id8, CameraDeviceStatus::PRESENT);
    }
}
alps/frameworks/av/services/camera/libcameraservice/common/CameraProviderManager.cpp
const std::string kLegacyProviderName("legacy/0");
const std::string kExternalProviderName("external/0");

跨进程调用CameraProvider服务
status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
        ServiceInteractionProxy* proxy) {
    // See if there's a passthrough HAL, but let's not complain if there's not
    addProviderLocked(kLegacyProviderName, /*expected*/ false);
    addProviderLocked(kExternalProviderName, /*expected*/ false);
    return OK;
}

status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
    sp<provider::V2_4::ICameraProvider> interface;
    interface = mServiceProxy->getService(newProvider);//获取服务端的 interface = CameraProviderImpl

    sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, interface, this);
    status_t res = providerInfo->initialize();
    mProviders.push_back(providerInfo);
}
CameraProviderManager::ProviderInfo::ProviderInfo(
        const std::string &providerName,
        sp<provider::V2_4::ICameraProvider>& interface,
        CameraProviderManager *manager) :
        mProviderName(providerName),
        mInterface(interface),
        mManager(manager) {
}
status_t CameraProviderManager::ProviderInfo::initialize() {
    status_t res = parseProviderName(mProviderName, &mType, &mId); //获得CameraProviderImpl的类型和ID
    hardware::Return<Status> status = mInterface->setCallback(this);//设置回调函数
    std::vector<std::string> devices;
    //回调函数作为形参,回调函数的形参为idStatus和cameraDeviceNames,返回值status和devices
    hardware::Return<void> ret = mInterface->getCameraIdList([&status, &devices](
            Status idStatus,
            const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
        status = idStatus;
        if (status == Status::OK) {
            for (size_t i = 0; i < cameraDeviceNames.size(); i++) {
               
devices.push_back(cameraDeviceNames[i]);
            }
        } });

    sp<StatusListener> listener = mManager->getStatusListener();
    for (auto& device : devices) {
        std::string id;
        status_t res = addDevice(device,
                hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT, &id);

    }
    return OK;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/provider/2.4/CameraProviderImpl.cpp
CameraProviderImpl::
getCameraIdList(getCameraIdList_cb _hidl_cb){
    std::vector<std::string> deviceNameList;
    auto status = mManager->getDeviceNameList(deviceNameList);//需要确认

    hidl_vec<hidl_string> hidlDeviceNameList;
    hidlDeviceNameList.resize(deviceNameList.size());
    for (size_t i = 0; i < deviceNameList.size(); i++) {
        hidlDeviceNameList[i] = deviceNameList[i];
    }
    _hidl_cb(mapToHidlCameraStatus(status), hidlDeviceNameList);
    return Void();
}
获的deviceNames
status_t CameraProviderManager::ProviderInfo::addDevice(const std::string& name,
        CameraDeviceStatus initialStatus, /*out*/ std::string* parsedId) {
    status_t res = parseDeviceName(name, &major, &minor, &type, &id);//根据设备名称获的设备号和类型
    //遍历mProviders验证设备是否已经存在,每个mProviders中包含多个设备
    if (mManager->isValidDeviceLocked(id, major)) {
        return BAD_VALUE;
    }
    std::unique_ptr<DeviceInfo> deviceInfo;
    switch (major) {
        case 1:
            deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, mProviderTagid,
                    id, minor);
            break;
        case 3:
            deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
                    id, minor);

            break;
    }
    mDevices.push_back(std::move(deviceInfo));//将DeviceInfo保存在mDevices数组中
    return OK;
}
std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
    CameraProviderManager::ProviderInfo::initializeDeviceInfo(
        const std::string &name, const metadata_vendor_id_t tagId,
        const std::string &id, uint16_t minorVersion){
    auto cameraInterface =
            getDeviceInterface<typename DeviceInfoT::InterfaceT>(name);

    return std::unique_ptr<DeviceInfo>(
        new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
                cameraInterface));//其中 cameraInterface 等于 CameraDevice3Impl
}
sp<device::V3_2::ICameraDevice>
CameraProviderManager::ProviderInfo::getDeviceInterface
        <device::V3_2::ICameraDevice>(const std::string &name) const {
    sp<device::V3_2::ICameraDevice> cameraInterface;
    //name和回调函数为实参,回调函数的返回值status和cameraInterface,回调函数的参数s和interface
    ret = mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
        Status s, sp<device::V3_2::ICameraDevice> interface) {
                status = s;
                cameraInterface = interface; //interface = CameraDevice3Impl
            });
    return cameraInterface;
}

跨进程调用CameraProviderImpl
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/provider/2.4/CameraProviderImpl.cpp
class CameraProviderImpl
    : public ICameraProvider
    , public android::hardware::hidl_death_recipient
    , public NSCam::ICameraDeviceManager::Callback

CameraProviderImpl::
getCameraDeviceInterface_V3_x(
    const hidl_string& cameraDeviceName,
    getCameraDeviceInterface_V3_x_cb _hidl_cb){
    getCameraDeviceInterface<::android::hardware::camera::device::V3_2::ICameraDevice>(cameraDeviceName, _hidl_cb);
    return Void();
}
CameraProviderImpl::
getCameraDeviceInterface(
    const hidl_string& cameraDeviceName,
    InterfaceCallbackT _hidl_cb
){
    ::android::sp<NSCam::ICameraDeviceManager::IBase_t> pBaseDevice(nullptr);
    auto status = mManager->getDeviceInterface(cameraDeviceName, pBaseDevice);
    auto pICameraDevice = ::android::sp<InterfaceT>(static_cast<InterfaceT*>(pBaseDevice.get()));
    _hidl_cb(mapToHidlCameraStatus(status), pICameraDevice);//调用回调函数赋值 cameraInterface = pICameraDevice
}

从mVirtEnumDeviceMap取出VirtEnumDevice对象,然后获得CameraDevice3Impl对象
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/devicemgr/CameraDeviceManagerBase.cpp
CameraDeviceManagerBase::
getDeviceInterface(
    const std::string& deviceName,
    ::android::sp<IBase_t>& rpDevice
){
    auto const& pInfo = mVirtEnumDeviceMap.valueFor(deviceName);
    auto const& pDevice = pInfo->mVirtDevice; //CameraDevice3Impl
    us_t status = pDevice->getDeviceInterfaceBase(rpDevice);
}
CameraDevice3Impl::
getDeviceInterfaceBase(
    ::android::sp<IBase>& rpDevice
){
    rpDevice = const_cast<IBase*>(static_cast<const IBase*>(this));
    return android::OK;
}
通过以上流程cameraserver包含CameraProviderManager,CameraProviderManager包含mProviders,mProviders包含ProviderInfo,ProviderInfo包含mDevices,mDevices包含DeviceInfo,DeviceInfo包含DeviceInfoT,DeviceInfoT包含CameraDevice3Impl。

总体逻辑顺序:

  1. provider 进程启动,注册;
  2. cameraserver 进程启动,注册,初始化;
  3. cameraserver 获取远端 provider(此时实例化 CameraProvider 并初始化)。

Android P之Camera HAL3流程分析

 

Open阶段

frameworks/base/core/java/android/hardware/camera2/CameraManager.java
    public void openCamera(@NonNull String cameraId,
            @NonNull final CameraDevice.StateCallback callback, @Nullable Handler handler)
            throws CameraAccessException {
        openCameraForUid(cameraId, callback, CameraDeviceImpl.checkAndWrapHandler(handler),
                USE_CALLING_UID);

    }
    public void openCameraForUid(@NonNull String cameraId,
            @NonNull final CameraDevice.StateCallback callback, @NonNull Executor executor,
            int clientUid) {
        if (CameraManagerGlobal.sCameraServiceDisabled) {
            throw new IllegalArgumentException("No cameras available on device");
        }
        openCameraDeviceUserAsync(cameraId, callback, executor, clientUid);
    }
frameworks/base/core/java/android/hardware/camera2/CameraManager.java
    private CameraDevice openCameraDeviceUserAsync(String cameraId,
            CameraDevice.StateCallback callback, Executor executor, final int uid)
            throws CameraAccessException {
            android.hardware.camera2.impl.CameraDeviceImpl deviceImpl =
                    new android.hardware.camera2.impl.CameraDeviceImpl(
                        cameraId,
                        callback,
                        executor,
                        characteristics,
                        mContext.getApplicationInfo().targetSdkVersion);

            ICameraDeviceCallbacks callbacks = deviceImpl.getCallbacks();
            try {
                if (supportsCamera2ApiLocked(cameraId)) {
                    // Use cameraservice's cameradeviceclient implementation for HAL3.2+ devices
                    ICameraService cameraService = CameraManagerGlobal.get().getCameraService();
                    if (cameraService == null) {
                        throw new ServiceSpecificException(
                            ICameraService.ERROR_DISCONNECTED,
                            "Camera service is currently unavailable");
                    }
                    cameraUser = cameraService.connectDevice(callbacks, cameraId,
                            mContext.getOpPackageName(), uid);

                }
            }
            //保存CameraDeviceClient对象到CameraDeviceImpl,
            deviceImpl.setRemoteDevice(cameraUser);
            device = deviceImpl;
            return device;
     }
frameworks/base/core/java/android/hardware/camera2/impl/CameraDeviceImpl.java
    public void setRemoteDevice(ICameraDeviceUser remoteDevice) throws CameraAccessException {
        synchronized(mInterfaceLock) {
            mRemoteDevice = new ICameraDeviceUserWrapper(remoteDevice);
            IBinder remoteDeviceBinder = remoteDevice.asBinder();
            if (remoteDeviceBinder != null) {
                    remoteDeviceBinder.linkToDeath(this, /*flag*/ 0);
            }
            mDeviceExecutor.execute(mCallOnOpened);
            mDeviceExecutor.execute(mCallOnUnconfigured);
        }
    }

跨进程调用CameraManager->CameraService
Status CameraService::connectDevice(
        const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
        const String16& cameraId,
        const String16& clientPackageName,
        int clientUid,
        /*out*/
        sp<hardware::camera2::ICameraDeviceUser>* device) {
    String8 id = String8(cameraId);
    sp<CameraDeviceClient> client = nullptr;
    ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
            /*api1CameraId*/-1,
            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
            clientUid, USE_CALLING_PID, API_2,
            /*legacyMode*/ false, /*shimUpdateOnly*/ false,
            /*out*/client);

    *device = client; // client = CameraDeviceClient
    return ret;
}
Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
        int api1CameraId, int halVersion, const String16& clientPackageName, int clientUid,
        int clientPid, apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
        /*out*/sp<CLIENT>& device) {
        if(!(ret = makeClient(this, cameraCb, clientPackageName,
                cameraId, api1CameraId, facing,
                clientPid, clientUid, getpid(), legacyMode,
                halVersion, deviceVersion, effectiveApiLevel,
                /*out*/&tmp)).isOk()
) {
            return ret;
        }
        client = static_cast<CLIENT*>(tmp.get());
        err = client->initialize(mCameraProviderManager, mMonitorTags);
}
Status CameraService::makeClient(const sp<CameraService>& cameraService,
        const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
        int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
        bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
        /*out*/sp<BasicClient>* client) {

    if (halVersion < 0 || halVersion == deviceVersion) {
        // Default path: HAL version is unspecified by caller, create CameraClient
        // based on device version reported by the HAL.
        switch(deviceVersion) {
          case CAMERA_DEVICE_API_VERSION_3_0:
          case CAMERA_DEVICE_API_VERSION_3_1:
          case CAMERA_DEVICE_API_VERSION_3_2:
          case CAMERA_DEVICE_API_VERSION_3_3:
          case CAMERA_DEVICE_API_VERSION_3_4:
            if (effectiveApiLevel == API_1) { // Camera1 API route
                sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
                *client = new Camera2Client(cameraService, tmp, packageName,
                        cameraId, api1CameraId,
                        facing, clientPid, clientUid,
                        servicePid, legacyMode);
            } else { // Camera2 API route
                sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
                        static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
                *client = new CameraDeviceClient(cameraService, tmp, packageName, cameraId,
                        facing, clientPid, clientUid, servicePid);

            }
            break;
        }
    }
}
获得CameraDeviceClient之后进行initialize
frameworks/av/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
status_t CameraDeviceClient::initialize(sp<CameraProviderManager> manager,
        const String8& monitorTags) {
    return initializeImpl(manager, monitorTags);
}
status_t CameraDeviceClient::initializeImpl(TProviderPtr providerPtr, const String8& monitorTags) {
    res = Camera2ClientBase::initialize(providerPtr, monitorTags);
    mFrameProcessor = new FrameProcessorBase(mDevice);
    threadName = String8::format("CDU-%s-FrameProc", mCameraIdStr.string());
    mFrameProcessor->run(threadName.string());
    mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
                                      FRAME_PROCESSOR_LISTENER_MAX_ID,
                                      /*listener*/this,
                                      /*sendPartials*/true);

    auto deviceInfo = mDevice->info();
    camera_metadata_entry_t physicalKeysEntry = deviceInfo.find(
            ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS);
    if (physicalKeysEntry.count > 0) {
        mSupportedPhysicalRequestKeys.insert(mSupportedPhysicalRequestKeys.begin(),
                physicalKeysEntry.data.i32,
                physicalKeysEntry.data.i32 + physicalKeysEntry.count);
    }
}
frameworks/av/services/camera/libcameraservice/common/Camera2ClientBase.cpp
status_t Camera2ClientBase<TClientBase>::initialize(sp<CameraProviderManager> manager,
        const String8& monitorTags) {
    return initializeImpl(manager, monitorTags);
}
status_t Camera2ClientBase<TClientBase>::initializeImpl(TProviderPtr providerPtr,
        const String8& monitorTags) {
    status_t res;
    res = TClientBase::startCameraOps();
    res = mDevice->initialize(providerPtr, monitorTags);
    wp<CameraDeviceBase::NotificationListener> weakThis(this);
    res = mDevice->setNotifyCallback(weakThis);
    return OK;
}
通过CameraService保存的CameraProviderManager对象获得mProviders,调用流程mProviders->DeviceInfo->CameraDevice3Impl->CameraDeviceSession
alps/frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
status_t Camera3Device::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
    sp<ICameraDeviceSession> session;
    status_t res = manager->openSession(mId.string(), this,
            /*out*/ &session);

    auto requestQueueRet = session->getCaptureRequestMetadataQueue(
        [&queue](const auto& descriptor) {
            queue = std::make_shared<RequestMetadataQueue>(descriptor);
            if (!queue->isValid() || queue->availableToWrite() <= 0) {
                queue = nullptr;
            }
        });
    auto resultQueueRet = session->getCaptureResultMetadataQueue(
        [&resQueue](const auto& descriptor) {
            resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
            if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
                resQueue = nullptr;
            }
        });
    mInterface = new HalInterface(session, queue);
}
status_t CameraProviderManager::openSession(const std::string &id,
        const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
        /*out*/
        sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session) {
    auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});

    auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
    //其中 mInterface 等于 CameraDevice3Impl
    ret = deviceInfo3->mInterface->open(callback, [&status, &session]
            (Status s, const sp<device::V3_2::ICameraDeviceSession>& cameraSession) {//需要确认session
                status = s;
                if (status == Status::OK) {
                    *session = cameraSession;
                }
            });
    return mapToStatusT(status);
}
CameraProviderManager::ProviderInfo::DeviceInfo* CameraProviderManager::findDeviceInfoLocked(
        const std::string& id,
        hardware::hidl_version minVersion, hardware::hidl_version maxVersion) const {
    for (auto& provider : mProviders) {
        for (auto& deviceInfo : provider->mDevices) {
            if (deviceInfo->mId == id &&
                    minVersion <= deviceInfo->mVersion && maxVersion >= deviceInfo->mVersion) {
                return deviceInfo.get();
            }
        }
    }
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3Impl.cpp
CameraDevice3Impl::
open(const ::android::sp<V3_2::ICameraDeviceCallback>& callback, open_cb _hidl_cb)//需要确认_hidl_cb
{
    ::android::status_t status = mSession->open(V3_4::ICameraDeviceCallback::castFrom(callback));
    return Void();
}

alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3SessionImpl.cpp
open(
    const ::android::sp<V3_4::ICameraDeviceCallback>& callback
){
    onOpenLocked(callback);
}
onOpenLocked(
    const ::android::sp<V3_4::ICameraDeviceCallback>& callback
){
    mCameraDeviceCallback = callback;
        //相机应用通过AppStreamManager来控制Pipline,同时处理Pipline的回调
        mAppStreamManager = IAppStreamManager::create(
            IAppStreamManager::CreationInfo{
                .mInstanceId            = getInstanceId(),
                .mCameraDeviceCallback  = callback,
                .mMetadataProvider      = mStaticInfo.mMetadataProvider,
                .mMetadataConverter     = mStaticInfo.mMetadataConverter,
                .mErrorPrinter          = std::static_pointer_cast<android::Printer>(mAppStreamManagerErrorState),
                .mWarningPrinter        = std::static_pointer_cast<android::Printer>(mAppStreamManagerWarningState),
                .mDebugPrinter          = std::static_pointer_cast<android::Printer>(mAppStreamManagerDebugState),
            }
        );
    auto pPipelineModelMgr = IPipelineModelManager::get();
    auto pPipelineModel = pPipelineModelMgr->getPipelineModel( getInstanceId() );
    pPipelineModel->open(getInstanceName().c_str(), this);

    mPipelineModel = pPipelineModel;
    return OK;
}

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/PipelineModelManagerImpl.cpp
PipelineModelManagerImpl::
getPipelineModel(
    int32_t const openId __unused
){
    auto pPipelineModel = pPipelineModelInfo->mPipelineModel =
                            PipelineModelImpl::createInstance(
                                PipelineModelImpl::CreationParams{
                                    .openId         = openId
                            });
    return pPipelineModelInfo->mPipelineModel;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/PipelineModelImpl.cpp
PipelineModelImpl::
createInstance(CreationParams const& creationParams)
{
    sp<PipelineModelImpl> pPipeline = new PipelineModelImpl(creationParams); //使用libmtkcam_modulefactory_utils.so
    pPipeline->init();
    return pPipeline;
}
PipelineModelImpl::
open(
    std::string const& userName,
    android::wp<IPipelineModelCallback> const& callback
){
    //创建线程初始化驱动
    mvOpenFutures.push_back(
        std::async(std::launch::async,
            [this]() {
                return CC_LIKELY( mHalDeviceAdapter!=nullptr )
                    && CC_LIKELY( mHalDeviceAdapter->open() ) //使用libmtkcam_modulefactory_drv.so
                    && CC_LIKELY( mHalDeviceAdapter->powerOn() );
            }
        )

    );
}
PipelineModelImpl::
init(){
    initPipelineStaticInfo(); //从camera驱动中读取信息
}

经过以上步骤,打开设备CameraDevice3Impl之后,返回CameraDeviceSession。
Android P之Camera HAL3流程分析

为什么要用std::async代替线程的创建

  std::async是为了让用户的少费点脑子的,它让这三个对象默契的工作。大概的工作过程是这样的:std::async先将异步操作用std::packaged_task包装起来,然后将异步操作的结果放到std::promise中,这个过程就是创造未来的过程。外面再通过future.get/wait来获取这个未来的结果,怎么样,std::async真的是来帮忙的吧,你不用再想到底该怎么用std::future、std::promise和std::packaged_task了,std::async已经帮你搞定一切了!

  现在来看看std::async的原型async(std::launch::async | std::launch::deferred, f, args...),第一个参数是线程的创建策略,有两种策略,默认的策略是立即创建线程:

  • std::launch::async:在调用async就开始创建线程。
  • std::launch::deferred:延迟加载方式创建线程。调用async时不创建线程,直到调用了future的get或者wait时才创建线程。

第二个参数是线程函数,第三个参数是线程函数的参数。
https://www.cnblogs.com/qicosmos/p/3534211.html
https://www.cnblogs.com/diegodu/p/6737973.html

=================================================================================================


Configuration阶段,最终返回PipelineModelSessionDefault对象
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3SessionImpl.cpp
onConfigureStreamsLocked(
    const WrappedStreamConfiguration& requestedConfiguration,
    WrappedHalStreamConfiguration& halConfiguration
){
    auto pAppStreamManager = getSafeAppStreamManager(); //获得CameraDevice3SessionImpl打开的 mAppStreamManager
    auto pPipelineModel = getSafePipelineModel(); //获得CameraDevice3SessionImpl打开的 mPipelineModel
    pPipelineModel->configure(pParams);
    pAppStreamManager->endConfigureStreams(halConfiguration);
    return OK;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/PipelineModelImpl.cpp
PipelineModelImpl::
configure(
    std::shared_ptr<UserConfigurationParams>const& params
){
    IPipelineModelSessionFactory::CreationParams sessionCfgParams;
    sessionCfgParams.pPipelineStaticInfo      = mPipelineStaticInfo;
    sessionCfgParams.pUserConfigurationParams = params;
    sessionCfgParams.pPipelineModelCallback   = mCallback.promote();
    mSession = IPipelineModelSessionFactory::createPipelineModelSession(sessionCfgParams);
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionFactory.cpp
IPipelineModelSessionFactory::
createPipelineModelSession(
    CreationParams const& params __unused
){
    //  (2) convert to UserConfiguration
    auto pUserConfiguration = convertToUserConfiguration(
        *params.pPipelineStaticInfo,
        *params.pUserConfigurationParams
    );
    //  (3) pipeline policy
    auto pSettingPolicy = IPipelineSettingPolicyFactory::createPipelineSettingPolicy(
        IPipelineSettingPolicyFactory::CreationParams{
            .pPipelineStaticInfo        = params.pPipelineStaticInfo,
            .pPipelineUserConfiguration = pUserConfiguration,
    });
    //  (4) pipeline session
    auto pSession = decidePipelineModelSession(params, pUserConfiguration, pSettingPolicy);
}

构造pipeline policy
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/policy/

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/policy/PipelineSettingPolicyFactoryImpl.cpp
IPipelineSettingPolicyFactory::
createPipelineSettingPolicy(
    CreationParams const& params __unused
){
    return decidePolicyAndMake(params, pPolicyTable, pMediatorTable);
}
decidePolicyAndMake(
    IPipelineSettingPolicyFactory::CreationParams const& params __unused,
    std::shared_ptr<PolicyTable> pPolicyTable __unused,
    std::shared_ptr<MediatorTable> pMediatorTable __unused
){
        return MAKE_PIPELINE_POLICY(PipelineSettingPolicyImpl);
    }
}
#define MAKE_PIPELINE_POLICY(_class_, ...) \
    std::make_shared<_class_>( \
        PipelineSettingPolicyImpl::CreationParams{ \
            .pPipelineStaticInfo        = params.pPipelineStaticInfo, \
            .pPipelineUserConfiguration = params.pPipelineUserConfiguration, \
            .pPolicyTable               = pPolicyTable, \
            .pMediatorTable             = pMediatorTable, \
        })
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/policy/PipelineSettingPolicyImpl.cpp
PipelineSettingPolicyImpl::
PipelineSettingPolicyImpl(
    CreationParams const& creationParams
)
    : IPipelineSettingPolicy()
    , mPipelineStaticInfo(creationParams.pPipelineStaticInfo)
    , mPipelineUserConfiguration(creationParams.pPipelineUserConfiguration)
    , mPolicyTable(creationParams.pPolicyTable)
    , mMediatorTable(creationParams.pMediatorTable)
{
}

构造pipeline session
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionFactory.cpp
decidePipelineModelSession(
    IPipelineModelSessionFactory::CreationParams const& creationParams,
    std::shared_ptr<PipelineUserConfiguration>const& pUserConfiguration,
    std::shared_ptr<IPipelineSettingPolicy>const& pSettingPolicy
){
    auto convertTo_CtorParams = [=]() {
        return PipelineModelSessionBase::CtorParams{
            .staticInfo = {
                .pPipelineStaticInfo    = creationParams.pPipelineStaticInfo,
                .pUserConfiguration     = pUserConfiguration,
            },
            .pPipelineModelCallback     = creationParams.pPipelineModelCallback,
            .pPipelineSettingPolicy     = pSettingPolicy,
        };
    };
    return PipelineModelSessionDefault::makeInstance("Default/", convertTo_CtorParams());
}

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionDefault.cpp
makeInstance(
    std::string const& name,
    CtorParams const& rCtorParams __unused
){
    android::sp<ThisNamespace> pSession = new ThisNamespace(name, rCtorParams);
    int const err = pSession->configure();
    return pSession;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionBasic.cpp
#define ThisNamespace   PipelineModelSessionBasic
ThisNamespace::
ThisNamespace(
    std::string const& name,
    CtorParams const& rCtorParams)
    : PipelineModelSessionBase(
        {name + std::to_string(rCtorParams.staticInfo.pPipelineStaticInfo->openId)},
        rCtorParams)

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionBase.cpp
PipelineModelSessionBase::
PipelineModelSessionBase(
    std::string const&& sessionName,
    CtorParams const& rCtorParams
)
    , mStaticInfo(rCtorParams.staticInfo)
    , mDebugInfo(rCtorParams.debugInfo)
    , mPipelineModelCallback(rCtorParams.pPipelineModelCallback)
    , mPipelineSettingPolicy(rCtorParams.pPipelineSettingPolicy)

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionDefault.cpp
configure(){
    return PipelineModelSessionBasic::configure();
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionBasic.cpp
configure(){
    onConfig_ConfigInfo2()
    onConfig_Capture()
    onConfig_BuildingPipelineContext()
}
onConfig_ConfigInfo2(){
    mConfigInfo2 = std::make_shared<ConfigInfo2>();
    {
        pipelinesetting::ConfigurationOutputParams out{
            .pStreamingFeatureSetting   = &mConfigInfo2->mStreamingFeatureSetting,
            .pCaptureFeatureSetting     = &mConfigInfo2->mCaptureFeatureSetting,
            .pPipelineTopology          = &mConfigInfo2->mPipelineTopology,
        };
        mPipelineSettingPolicy->evaluateConfiguration(out, {});//通过 PipelineSettingPolicyImpl 配置处理节点和Feature
    }
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/policy/ConfigSettingPolicyMediator.cpp
evaluateConfiguration(
    ConfigurationOutputParams& out,
    ConfigurationInputParams const& in __unused
){
    mPolicyTable->mFeaturePolicy->evaluateConfiguration
    mPolicyTable->fConfigPipelineNodesNeed
    mPolicyTable->fConfigPipelineTopology
    mPolicyTable->fConfigSensorSetting
    mPolicyTable->fConfigP1HwSetting
    mPolicyTable->fConfigP1DmaNeed
    mPolicyTable->fConfigStreamInfo_P1
    mPolicyTable->fConfigStreamInfo_NonP1
}
创建PipelineContext对象 mCurrentPipelineContext
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionBasic.cpp
onConfig_BuildingPipelineContext(){
    BuildPipelineContextInputParams const in{
        .pipelineName               = getSessionName(),
        .pPipelineTopology          = &mConfigInfo2->mPipelineTopology,
        .pStreamingFeatureSetting   = &mConfigInfo2->mStreamingFeatureSetting,
        .pCaptureFeatureSetting     = &mConfigInfo2->mCaptureFeatureSetting,
    };
    buildPipelineContext(mCurrentPipelineContext, in)
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/utils/PipelineContextBuilder.cpp
auto buildPipelineContext(
    android::sp<PipelineContext>& out,
    BuildPipelineContextInputParams const& in
){
    android::sp<PipelineContext> pNewPipelineContext = PipelineContext::create(in.pipelineName.c_str());
    调用PipelineContextImpl的waitUntilDrained
    pNewPipelineContext->beginConfigure(
                      in.pOldPipelineContext)
    配置Streams
    configContextLocked_Streams(
                    pNewPipelineContext,
                    in.pParsedStreamInfo_P1,
                    in.pZSLProvider,
                    in.pParsedStreamInfo_NonP1,
                    &common)
    配置Nodes
    configContextLocked_Nodes(
                    pNewPipelineContext,
                    in.pOldPipelineContext,
                    in.pStreamingFeatureSetting,
                    in.pCaptureFeatureSetting,
                    in.pParsedStreamInfo_P1,
                    in.pParsedStreamInfo_NonP1,
                    in.pPipelineNodesNeed,
                    in.pSensorSetting,
                    in.pvP1HwSetting,
                    in.batchSize,
                    &common)
    配置Pipeline
    configContextLocked_Pipeline(
                    pNewPipelineContext,
                    in.pPipelineTopology)

    调用PipelineContextImpl的config
    pNewPipelineContext->endConfigure(
                          bUsingMultiThreadToBuildPipelineContext)
    out = pNewPipelineContext;
}

配置Streams
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextBuilders.cpp
configContextLocked_Streams(
    sp<PipelineContext> pContext,
    std::vector<ParsedStreamInfo_P1> const* pParsedStreamInfo_P1,
    android::sp<IStreamBufferProvider>   pZSLProvider,
    ParsedStreamInfo_NonP1 const* pParsedStreamInfo_NonP1,
    InternalCommonInputParams const* pCommon
)
StreamBuilder::
StreamBuilder(
    eStreamType const type,
    sp<IImageStreamInfo> pStreamInfo
) : mpImpl( new StreamBuilderImpl() ){
    mpImpl->mType             = type;
    mpImpl->mpImageStreamInfo = pStreamInfo;
}
StreamBuilder::
StreamBuilder(
    eStreamType const type,
    sp<IMetaStreamInfo> pStreamInfo
) : mpImpl( new StreamBuilderImpl() ){
    mpImpl->mType            = type;
    mpImpl->mpMetaStreamInfo = pStreamInfo;
}
StreamBuilder::
build(sp<PipelineContext> pContext){
    typedef PipelineContext::PipelineContextImpl        PipelineContextImplT;
    PipelineContextImplT* pContextImpl = pContext->getImpl();
    pContextImpl->updateConfig(mpImpl.get());
}

配置Nodes
configContextLocked_Nodes(
    sp<PipelineContext> pContext,
    android::sp<PipelineContext> const& pOldPipelineContext,
    StreamingFeatureSetting const* pStreamingFeatureSetting,
    CaptureFeatureSetting const* pCaptureFeatureSetting,
    std::vector<ParsedStreamInfo_P1> const* pParsedStreamInfo_P1,
    ParsedStreamInfo_NonP1 const* pParsedStreamInfo_NonP1,
    PipelineNodesNeed const* pPipelineNodesNeed,
    std::vector<SensorSetting> const* pSensorSetting,
    std::vector<P1HwSetting> const* pvP1HwSetting,
    uint32_t batchSize,
    InternalCommonInputParams const* pCommon
)
{
    for(size_t i = 0; i < pPipelineNodesNeed->needP1Node.size(); i++) {
        if (pPipelineNodesNeed->needP1Node[i]) {
            configContextLocked_P1Node(pContext,
                            pOldPipelineContext,
                            pStreamingFeatureSetting,
                            pPipelineNodesNeed,
                            &(*pParsedStreamInfo_P1)[i],
                            pParsedStreamInfo_NonP1,
                            &(*pSensorSetting)[i],
                            &(*pvP1HwSetting)[i],
                            i,
                            batchSize,
                            useP1NodeCount > 1,
                            bMultiCam_CamSvPath,
                            pCommon,
                            isReConfig);
        }
    }
    if( pPipelineNodesNeed->needP2StreamNode ) {
        bool hasMonoSensor = false;
        for(auto const v : pPipelineStaticInfo->sensorRawType) {
            if(SENSOR_RAW_MONO == v) {
                hasMonoSensor = true;
                break;
            }
        }
        configContextLocked_P2SNode(pContext,
                            pStreamingFeatureSetting,
                            pParsedStreamInfo_P1,
                            pParsedStreamInfo_NonP1,
                            batchSize,
                            useP1NodeCount,
                            hasMonoSensor,
                            pCommon);
    }
    if( pPipelineNodesNeed->needP2CaptureNode ) {
        configContextLocked_P2CNode(pContext,
                            pCaptureFeatureSetting,
                            pParsedStreamInfo_P1,
                            pParsedStreamInfo_NonP1,
                            useP1NodeCount,
                            pCommon);
    }
    if( pPipelineNodesNeed->needFDNode ) {
        configContextLocked_FdNode(pContext,
                            pParsedStreamInfo_P1,
                            pParsedStreamInfo_NonP1,
                            useP1NodeCount,
                            pCommon);
    }
    if( pPipelineNodesNeed->needJpegNode ) {
        configContextLocked_JpegNode(pContext,
                            pParsedStreamInfo_NonP1,
                            useP1NodeCount,
                            pCommon);
    }
    if( pPipelineNodesNeed->needRaw16Node ) {
        configContextLocked_Raw16Node(pContext,
                            pParsedStreamInfo_P1,
                            pParsedStreamInfo_NonP1,
                            useP1NodeCount,
                            pCommon);
    }
    if( pPipelineNodesNeed->needPDENode ) {
        configContextLocked_PDENode(pContext,
                            pParsedStreamInfo_P1,
                            pParsedStreamInfo_NonP1,
                            useP1NodeCount,
                            pCommon);
    }
}

配置Pipeline
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContext.cpp
PipelineContext::
PipelineContext(char const* name)
    : mpImpl( new PipelineContextImpl(name) )

configContextLocked_Pipeline(
    sp<PipelineContext> pContext,
    PipelineTopology const* pPipelineTopology
){
    PipelineBuilder()
    .setRootNode(pPipelineTopology->roots)  //设置RootNode
    .setNodeEdges(pPipelineTopology->edges) //设置NodeEdges
    .build(pContext)
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextBuilders.cpp
PipelineBuilder::
PipelineBuilder()
    : mpImpl( new PipelineBuilderImpl() )
{
}
PipelineBuilder::
build(
    sp<PipelineContext> pContext
){
    typedef PipelineContext::PipelineContextImpl        PipelineContextImplT;
    PipelineContextImplT* pContextImpl = pContext->getImpl();
    pContextImpl->updateConfig(mpImpl.get())
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextImpl.cpp
PipelineContext::PipelineContextImpl::
updateConfig(PipelineBuilderImpl* pBuilder)
{
    NodeSet const& rootNodes = pBuilder->mRootNodes;
    NodeEdgeSet const& edges = pBuilder->mNodeEdges;

    // update to context
    mpPipelineConfig->setRootNode(rootNodes);  //设置RootNode
    mpPipelineConfig->setNodeEdges(edges); //设置NodeEdges
}

alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContext.cpp
PipelineContext::
endConfigure(MBOOL const parallelConfig){
    getImpl()->config(mpOldContext.get() ? mpOldContext->getImpl() : NULL, parallelConfig);
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextImpl.cpp
PipelineContext::PipelineContextImpl::
config(
    PipelineContextImpl* pOldContext,
    MBOOL const isAsync
)


=======================================================================================================


Request阶段,构造 CaptureRequest->UserRequestParams->ParsedAppRequest->IPipelineFrame,最终交给PipelineContextImpl处理
alps/frameworks/av/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
binder::Status CameraDeviceClient::submitRequest(
        const hardware::camera2::CaptureRequest& request,
        bool streaming,
        /*out*/
        hardware::camera2::utils::SubmitInfo *submitInfo) {
    std::vector<hardware::camera2::CaptureRequest> requestList = { request };//实际上数组中只有一个对象
    return submitRequestList(requestList, streaming, submitInfo);
}
转换请求对象CaptureRequest->PhysicalCameraSettingsList
binder::Status CameraDeviceClient::submitRequestList(
        const std::vector<hardware::camera2::CaptureRequest>& requests,
        bool streaming,
        hardware::camera2::utils::SubmitInfo *submitInfo) {
    List<const CameraDeviceBase::PhysicalCameraSettingsList> metadataRequestList;//Metadata链表
    std::list<const SurfaceMap> surfaceMapList;
    submitInfo->mRequestId = mRequestIdCounter;

    for (auto&& request: requests) {
        SurfaceMap surfaceMap;
        Vector<int32_t> outputStreamIds;
        std::vector<std::string> requestedPhysicalIds;
        if (request.mSurfaceList.size() > 0) {//循环初始化Surface
            for (sp<Surface> surface : request.mSurfaceList) {
                int32_t streamId;
                sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
                res = insertGbpLocked(gbp, &surfaceMap, &outputStreamIds, &streamId);

                ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
                if (index >= 0) {
                    String8 requestedPhysicalId(
                            mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                    requestedPhysicalIds.push_back(requestedPhysicalId.string());
                }
            }
        } else {
            for (size_t i = 0; i < request.mStreamIdxList.size(); i++) {
                int streamId = request.mStreamIdxList.itemAt(i);
                int surfaceIdx = request.mSurfaceIdxList.itemAt(i);
                ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
                const auto& gbps = mConfiguredOutputs.valueAt(index).getGraphicBufferProducers();
                res = insertGbpLocked(gbps[surfaceIdx], &surfaceMap, &outputStreamIds, nullptr);
                String8 requestedPhysicalId(mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
                requestedPhysicalIds.push_back(requestedPhysicalId.string());
            }
        }
        构造PhysicalCameraSettingsList对象
        CameraDeviceBase::PhysicalCameraSettingsList physicalSettingsList;
        for (const auto& it : request.mPhysicalCameraSettings) {
            String8 physicalId(it.id.c_str());
            if (physicalId != mDevice->getId()) {
                auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(), it.id);

                if (!mSupportedPhysicalRequestKeys.empty()) {
                    CameraMetadata filteredParams(mSupportedPhysicalRequestKeys.size());
                    camera_metadata_t *meta = const_cast<camera_metadata_t *>(
                            filteredParams.getAndLock());
                    set_camera_metadata_vendor_id(meta, mDevice->getVendorTagId());
                    filteredParams.unlock(meta);

                    for (const auto& keyIt : mSupportedPhysicalRequestKeys) {
                        camera_metadata_ro_entry entry = it.settings.find(keyIt);
                        if (entry.count > 0) {
                            filteredParams.update(entry);
                        }
                    }
                    physicalSettingsList.push_back({it.id, filteredParams});
                }
            } else {
                physicalSettingsList.push_back({it.id, it.settings});
            }
        }
        //更新数据
        physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS,
                &outputStreamIds[0], outputStreamIds.size());

        if (request.mIsReprocess) {
            physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_INPUT_STREAMS,
                    &mInputStream.id, 1);
        }
        physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_ID,
                &(submitInfo->mRequestId), /*size*/1);
        loopCounter++; // loopCounter starts from 1
        //压栈
        metadataRequestList.push_back(physicalSettingsList);
        surfaceMapList.push_back(surfaceMap);
    }
    mRequestIdCounter++;

    if (streaming) {//预览会走此条通道
        err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
                &(submitInfo->mLastFrameNumber));
        mStreamingRequestId = submitInfo->mRequestId;
    } else {//拍照等走此条通道
        err = mDevice->captureList(metadataRequestList, surfaceMapList,
                &(submitInfo->mLastFrameNumber));
    }
}
预览
status_t Camera3Device::setStreamingRequestList(
        const List<const PhysicalCameraSettingsList> &requestsList,
        const std::list<const SurfaceMap> &surfaceMaps, int64_t *lastFrameNumber) {
    return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
拍照
alps/frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
status_t Camera3Device::captureList(const List<const PhysicalCameraSettingsList> &requestsList,
                                    const std::list<const SurfaceMap> &surfaceMaps,
                                    int64_t *lastFrameNumber) {
    return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/false, lastFrameNumber);
}
setStreamingRequestList和captureList方法都调用了submitRequestsHelper方法,只是他们的repeating参数一个ture,一个为false。

status_t Camera3Device::submitRequestsHelper(
        const List<const PhysicalCameraSettingsList> &requests,
        const std::list<const SurfaceMap> &surfaceMaps,
        bool repeating,
        int64_t *lastFrameNumber){
    RequestList requestList;
    转换请求对象PhysicalCameraSettingsList->CaptureRequest
    res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
            repeating, /*out*/&requestList);
    if (repeating) {
        res = mRequestThread->setRepeatingRequests(requestList, lastFrameNumber);
    } else {
        res = mRequestThread->queueRequestList(requestList, lastFrameNumber);
    }        
}

入队预览请求
status_t Camera3Device::RequestThread::setRepeatingRequests(
        const RequestList &requests,
        /*out*/
        int64_t *lastFrameNumber) {
    if (lastFrameNumber != NULL) {
        *lastFrameNumber = mRepeatingLastFrameNumber;
    }
    mRepeatingRequests.clear();
    mRepeatingRequests.insert(mRepeatingRequests.begin(),
            requests.begin(), requests.end());//把预览的请求放到mRepeatingRequests队列中

    unpauseForNewRequests();
    mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
    return OK;
}

入队拍照请求
status_t Camera3Device::RequestThread::queueRequestList(
        List<sp<CaptureRequest> > &requests,
        /*out*/
        int64_t *lastFrameNumber) {
    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
        mRequestQueue.push_back(*it); //最终把CaputreRequest到放到mRequestQueue队列
    }
    if (lastFrameNumber != NULL) {
        *lastFrameNumber = mFrameNumber + mRequestQueue.size() - 1;
    }
    unpauseForNewRequests();
}
处理队列中的预览和拍照请求
bool Camera3Device::RequestThread::threadLoop() {
    waitForNextRequestBatch();//等待拍照或者预览请求
    prepareHalRequests();//构造HAL层captureRequest和输出的outputBuffers

    // Inform waitUntilRequestProcessed thread of a new request ID
    mLatestRequestId = latestRequestId;
    mLatestRequestSignal.signal();

    if (mInterface->supportBatchRequest()) { //是否支持批量处理
        submitRequestSuccess = sendRequestsBatch();
    } else {
        submitRequestSuccess = sendRequestsOneByOne();
    }
     return submitRequestSuccess;//返回值为false则中断循环
}
优先处理拍照请求,预览优先级较低
void Camera3Device::RequestThread::waitForNextRequestBatch() {
    //处理主帧,将主帧保存到数组中
    NextRequest nextRequest;
    nextRequest.captureRequest = waitForNextRequestLocked();
    nextRequest.halRequest = camera3_capture_request_t();
    nextRequest.submitted = false;
    mNextRequests.add(nextRequest);

    //处理子帧,一般不走
    const size_t batchSize = nextRequest.captureRequest->mBatchSize;
    for (size_t i = 1; i < batchSize; i++) {
        NextRequest additionalRequest;
        additionalRequest.captureRequest = waitForNextRequestLocked();
        additionalRequest.halRequest = camera3_capture_request_t();
        additionalRequest.submitted = false;
        mNextRequests.add(additionalRequest);
    }
}
返回拍照或预览需要处理的请求
sp<Camera3Device::CaptureRequest>
        Camera3Device::RequestThread::waitForNextRequestLocked() {
    status_t res;
    sp<CaptureRequest> nextRequest;
    while (mRequestQueue.empty()) {//当没有拍照时处理预览,拍照则跳过该循环
        if (!mRepeatingRequests.empty()) {
            const RequestList &requests = mRepeatingRequests;
            RequestList::const_iterator firstRequest = requests.begin();
            nextRequest = *firstRequest;
            mRequestQueue.insert(mRequestQueue.end(),++firstRequest,requests.end());
            mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1;
            break;
        }

        res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout);
        if ((mRequestQueue.empty() && mRepeatingRequests.empty()) || exitPending()) {
            if (mPaused == false) {
                mPaused = true;
                sp<StatusTracker> statusTracker = mStatusTracker.promote();
                if (statusTracker != 0) {
                    statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
                }
            }
            return NULL;
        }
    }

    if (nextRequest == NULL) {//处理拍照
        RequestList::iterator firstRequest = mRequestQueue.begin();
        nextRequest = *firstRequest;
        mRequestQueue.erase(firstRequest);//消除请求
        if (mRequestQueue.empty() && !nextRequest->mRepeating) {
            sp<NotificationListener> listener = mListener.promote();
            if (listener != NULL) {
                listener->notifyRequestQueueEmpty();//拍照回调函数
            }
        }
    }
     return nextRequest;
}
构造HAL层captureRequest和输出的outputBuffers
status_t Camera3Device::RequestThread::prepareHalRequests() {
}
处理数组中拍照或者预览的请求
bool Camera3Device::RequestThread::sendRequestsBatch() {
    status_t res;
    size_t batchSize = mNextRequests.size();
    std::vector<camera3_capture_request_t*> requests(batchSize);
    uint32_t numRequestProcessed = 0;
    for (size_t i = 0; i < batchSize; i++) {
        requests[i] = &mNextRequests.editItemAt(i).halRequest;
    }
    //mInterface = new HalInterface(session, queue);
    res = mInterface->processBatchCaptureRequests(requests, &numRequestProcessed);
}
转换请求对象camera3_capture_request_t->CaptureRequest
status_t Camera3Device::HalInterface::processBatchCaptureRequests(
          std::vector<camera3_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed){
    for (size_t i = 0; i < batchSize; i++) {
        if (hidlSession_3_4 != nullptr) {
            wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
                    /*out*/&handlesCreated);
        } else {
            wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
        }
    }
    auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
    hidlSession_3_4->proce*tureRequest_3_4(captureRequests_3_4, cachesToRemove,
            [&status, &numRequestProcessed] (auto s, uint32_t n) {
                status = s;
                *numRequestProcessed = n;
            });
    return CameraProviderManager::mapToStatusT(status);
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3SessionImpl.cpp
proce*tureRequest_3_4(const hidl_vec<V3_4::CaptureRequest>& requests, const hidl_vec<BufferCache>& cachesToRemove, proce*tureRequest_3_4_cb _hidl_cb)
{
    uint32_t numRequestProcessed = 0;
    auto status = onProce*tureRequest(requests, cachesToRemove, numRequestProcessed);
    _hidl_cb(mapToHidlCameraStatus(status), numRequestProcessed);
    return Void();
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/device/CameraDevice3SessionImpl.cpp
ThisNamespace::
onProce*tureRequest(
    const hidl_vec<V3_4::CaptureRequest>& requests,
    const hidl_vec<BufferCache>& cachesToRemove,
    uint32_t& numRequestProcessed
){
    ::android::Vector<IAppStreamManager::Request> appRequests;
    auto pAppStreamManager = getSafeAppStreamManager();
    pAppStreamManager->submitRequest(requests, appRequests);

    std::vector<std::shared_ptr<UserRequestParams>> vPipelineRequests(appRequests.size());
    for ( size_t i=0; i<appRequests.size(); ++i ) {
        auto& pItem = vPipelineRequests[i];
        pItem = std::make_shared<UserRequestParams>();

        pItem->requestNo = appRequests[i].frameNo;
        _CLONE_(pItem->vIImageBuffers,    appRequests[i].vInputImageBuffers);
        _CLONE_(pItem->vOImageBuffers,    appRequests[i].vOutputImageBuffers);
        _CLONE_(pItem->vIMetaBuffers,     appRequests[i].vInputMetaBuffers);
    }
    auto pPipelineModel = getSafePipelineModel();
    pPipelineModel->submitRequest(vPipelineRequests, numRequestProcessed);
}
将请求对象CaptureRequest->UserRequestParams
alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/app/AppStreamMgr.cpp
AppStreamMgr::
submitRequest(
    const hidl_vec<V3_4::CaptureRequest>& captureRequests,
    android::Vector<Request>& rRequests
){
    mRequestHandler->submitRequest(captureRequests, rRequests);
}
调用PipelineModelSessionDefault对UserRequestParams进行处理
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/PipelineModelImpl.cpp
PipelineModelImpl::
submitRequest(
    std::vector<std::shared_ptr<UserRequestParams>>const& requests,
    uint32_t& numRequestProcessed
){
    session = mSession;
    session->submitRequest(requests, numRequestProcessed);
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionBase.cpp
PipelineModelSessionBase::
submitRequest(
    std::vector<std::shared_ptr<UserRequestParams>>const& requests,
    uint32_t& numRequestProcessed
){
    //Convert: UserRequestParams -> ParsedAppRequest
    for (size_t i = 0; i < requests.size(); i++) {
        auto r = std::make_shared<ParsedAppRequest>();
        parseAppRequest(r.get(), requests[i].get() );//构造AppRequest对象
        reqs.emplace_back(r);
    }

    //Submit ParsedAppRequest one by one
    for (size_t i = 0; i < reqs.size(); i++, numRequestProcessed++) {
        submitOneRequest(reqs[i]);
    }
}
submitOneRequest(
    std::shared_ptr<ParsedAppRequest>const& request __unused
){
    auto const requestNo = request->requestNo;
    auto pConfigInfo2 = getCurrentConfigInfo2();
    auto pReqOutputParams = std::make_shared<pipelinesetting::RequestOutputParams>();
    onRequest_EvaluateRequest(*pReqOutputParams, request, pConfigInfo2)
    onRequest_Reconfiguration(pConfigInfo2, *pReqOutputParams, request)
    onRequest_ProcessEvaluatedFrame(*pReqOutputParams, request, pConfigInfo2)
    onRequest_Ending(*pReqOutputParams)
}
onRequest_ProcessEvaluatedFrame(
    policy::pipelinesetting::RequestOutputParams const& reqOutput __unused,
    std::shared_ptr<ParsedAppRequest>const& pRequest __unused,
    std::shared_ptr<ConfigInfo2>const& pConfigInfo2 __unused
){
    auto pAppMetaControl = std::make_shared<IMetadata>(); // original app control
    *pAppMetaControl = *autoAppMetaControl.get();

    auto pPipelineContext = getCurrentPipelineContext(); // 获得 mCurrentPipelineContext
    int res = processEvaluatedFrame(reqOutput,
                                    pAppMetaControl,
                                    pRequest,
                                    pConfigInfo2,
                                    pPipelineContext);
}
processEvaluatedFrame(
    policy::pipelinesetting::RequestOutputParams const& reqOutput,
    std::shared_ptr<IMetadata> pAppMetaControl,
    std::shared_ptr<ParsedAppRequest> request,
    std::shared_ptr<ConfigInfo2> pConfigInfo2,
    android::sp<PipelineContext> pPipelineContext
){
    // process each frame
    uint32_t lastFrameNo = 0;
    auto processFrame = [&] (pipelinesetting::RequestResultParams const& result, int frameType) -> int
    {
        return processOneEvaluatedFrame(
            lastFrameNo,
            frameType,
            result,
            reqOutput,
            pAppMetaControl,
            request,
            pConfigInfo2,
            pPipelineContext
        );
    };

    // pre-dummy frames
    for (auto const& frame : reqOutput.preDummyFrames) {
        processFrame(*frame, eFRAMETYPE_PREDUMMY);
    }
    // main frame
    processFrame(*(reqOutput.mainFrame), eFRAMETYPE_MAIN);

    // sub frames
    for (auto const& frame : reqOutput.subFrames) {
        processFrame(*frame, eFRAMETYPE_SUB);
    }
    // post-dummy frames
    for (auto const& frame : reqOutput.postDummyFrames) {
        processFrame(*frame, eFRAMETYPE_POSTDUMMY);
    }

    if (reqOutput.mainFrame->nodesNeed.needJpegNode)
    {
        auto pCaptureInFlightRequest = getCaptureInFlightRequest();
        if ( pCaptureInFlightRequest != nullptr ) {
            pCaptureInFlightRequest->insertRequest(request->requestNo, eMSG_INFLIGHT_NORMAL);
        }
    }
    for (auto& control : reqOutput.vboostControl)
    {
        if (control.boostScenario != -1 && control.boostScenario != (int32_t)IScenarioControlV3::Scenario_None)
        {
            mpScenarioCtrl->boostScenario(control.boostScenario, control.featureFlag, lastFrameNo);
        }
    }
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/session/PipelineModelSessionDefault.cpp
processOneEvaluatedFrame(
    uint32_t& lastFrameNo,
    uint32_t frameType,
    policy::pipelinesetting::RequestResultParams const& reqResult,
    policy::pipelinesetting::RequestOutputParams const& reqOutput,
    std::shared_ptr<IMetadata> pAppMetaControl,
    std::shared_ptr<ParsedAppRequest> request,
    std::shared_ptr<ConfigInfo2> pConfigInfo2,
    android::sp<PipelineContext> pPipelineContext
){
    BuildPipelineFrameInputParams const params = {
        .requestNo = request->requestNo,
        .pAppImageStreamBuffers = (frameType == eFRAMETYPE_MAIN ? request->pParsedAppImageStreamBuffers.get() : nullptr),
        .pAppMetaStreamBuffers  = (vAppMeta.empty() ? nullptr : &vAppMeta),
        .pHalImageStreamBuffers = nullptr,
        .pHalMetaStreamBuffers  = (vHalMeta.empty() ? nullptr : &vHalMeta),
        .pvUpdatedImageStreamInfo = &(reqResult.vUpdatedImageStreamInfo),
        .pnodeSet = &reqResult.nodeSet,
        .pnodeIOMapImage = &(reqResult.nodeIOMapImage),
        .pnodeIOMapMeta = &(reqResult.nodeIOMapMeta),
        .pRootNodes = &(reqResult.roots),
        .pEdges = &(reqResult.edges),
        .pCallback = (frameType == eFRAMETYPE_MAIN ? this : nullptr),
        .pPipelineContext = pPipelineContext
    };
        // check pending request
        if ((frameType == eFRAMETYPE_MAIN)&&(pZslProcessor->hasPendingZslRequest()))
        {
            enqueZslBuildFrameParam(params, frameType);
            submitZslReq(reqOutput, pPipelineContext, lastFrameNo);
        }
        //使用ParsedAppRequest和其它信息构造IPipelineFrame对象
        android::sp<IPipelineFrame> pPipelineFrame;
        buildPipelineFrame(pPipelineFrame, params);//pPipelineFrame = PipelineBufferSetFrameControlImp

        if ((frameType == eFRAMETYPE_PREDUMMY) || (frameType == eFRAMETYPE_POSTDUMMY) || (frameType == eFRAMETYPE_SUB))
            pZslProcessor->setBufferEnqueCnt(pPipelineFrame->getFrameNo(), mZSLConfigStreamCnt, 0);
        lastFrameNo = pPipelineFrame->getFrameNo();
        pPipelineContext->queue(pPipelineFrame); //异步操作,交由PipelineContextImpl处理
}
构造 IPipelineFrame
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/model/utils/PipelineFrameBuilder.cpp
auto buildPipelineFrame(
    android::sp<IPipelineFrame>& out __unused,
    BuildPipelineFrameInputParams const& in __unused
){
    RequestBuilder builder; //实际上会构造RequestBuilderImpl对象做实际工作
    builder.setReprocessFrame(in.bReprocessFrame);
    builder.setRootNode( *in.pRootNodes );
    builder.setNodeEdges( *in.pEdges );
    // IOMap of Image/Meta
    for ( auto key : *(in.pnodeSet) ) {
        auto const& it_image = in.pnodeIOMapImage->find(key);
        auto const& it_meta  = in.pnodeIOMapMeta->find(key);
        builder.setIOMap(
                key,
                (it_image !=in.pnodeIOMapImage->end() ) ? it_image->second : IOMapSet::empty(),
                (it_meta !=in.pnodeIOMapMeta->end() )   ? it_meta->second  : IOMapSet::empty()
            );
    }
    sp<IPipelineFrame> pFrame = builder
        .updateFrameCallback(in.pCallback)
        .build(in.requestNo, in.pPipelineContext);
    out = pFrame;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextBuilders.cpp
RequestBuilder::
RequestBuilder()
    : mpImpl(new RequestBuilderImpl())
{
}
RequestBuilder::
build(
    MUINT32 const requestNo,
    sp<PipelineContext> pContext
){
    typedef PipelineContext::PipelineContextImpl        PipelineContextImplT;
    PipelineContextImplT* pContextImpl = pContext->getImpl();
    sp<IPipelineFrame> pFrame = pContextImpl->constructRequest(mpImpl.get(), requestNo);
    return pFrame;
}
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextImpl.cpp
PipelineContext::PipelineContextImpl::
constructRequest(RequestBuilderImpl* pBuilder, MUINT32 const requestNo){
    MBOOL const& bReprocessFrame = pBuilder->mbReprocessFrame;
    NodeIOMaps const& aImageIOMaps  = pBuilder->mImageNodeIOMaps;
    NodeIOMaps const& aMetaIOMaps   = pBuilder->mMetaNodeIOMaps;
    NodeEdgeSet const& aEdges       = pBuilder->mNodeEdges;
    NodeSet const& aRoots           = pBuilder->mRootNodes;
    wp<AppCallbackT> const& aAppCallback = pBuilder->mpCallback;

    typedef IPipelineBufferSetFrameControl          PipelineFrameT;
    sp<PipelineFrameT> pFrame = PipelineFrameT::create(
        requestNo,
        frameNo,
        bReprocessFrame,
        aAppCallback, // IAppCallback
        mpStreamConfig.get(), //  IPipelineStreamBufferProvider
        mpDispatcher // IPipelineNodeCallback
    );
    //构造PipelineDAG对象
    sp<IPipelineDAG> pReqDAG = constructDAG(
            mpPipelineDAG.get(),
            aRoots,
            aEdges
            );
    sp<IPipelineFrameNodeMapControl> pReqFrameNodeMap;
    {
        sp<IPipelineFrameNodeMapControl> pFrameNodeMap = IPipelineFrameNodeMapControl::create();
        construct_FrameNodeMapControl::Params params = {
        .pImageNodeIOMaps  = &aImageIOMaps,
        .pMetaNodeIOMaps   = &aMetaIOMaps,
        .pReqDAG           = pReqDAG.get(),
        .pReqStreamInfoSet = pReqStreamInfoSet.get(),
        .pMapControl       = pFrameNodeMap.get()
        };
        construct_FrameNodeMapControl()(params);
        pReqFrameNodeMap = pFrameNodeMap;
    }
    pFrame->setPipelineNodeMap (mpPipelineNodeMap.get());
    pFrame->setNodeMap         (pReqFrameNodeMap);
    pFrame->setPipelineDAG     (pReqDAG);
    pFrame->setStreamInfoSet   (pReqStreamInfoSet);
    pFrame->setPhysicalCameraSetting(aPhysicalCameraSettings);
    pFrame->finishConfiguration();
    return pFrame;
}
实现IPipelineFrame的功能
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineBufferSetFrameControlImp.cpp
#define MAIN_CLASS_NAME PipelineBufferSetFrameControlImp
IPipelineBufferSetFrameControl*
IPipelineBufferSetFrameControl::
create(
    MUINT32 requestNo,
    MUINT32 frameNo,
    MBOOL bReporcessFrame,
    android::wp<IAppCallback>const& pAppCallback,
    IPipelineStreamBufferProvider const* pBufferProvider,
    android::wp<IPipelineNodeCallback> pNodeCallback
)
{
    return new MAIN_CLASS_NAME(requestNo, frameNo, bReporcessFrame, pAppCallback, pBufferProvider, pNodeCallback);
}

处理拍照或者预览请求,返回的错误不会进行处理
alps/vendor/mediatek/proprietary/hardware/mtkcam3/pipeline/pipeline/PipelineContextImpl.cpp
PipelineContext::PipelineContextImpl::
queue( sp<IPipelineFrame> const& pFrame ){
    mpInFlightRequest->registerRequest(pFrame);
    sp<IPipelineNodeMap const> pPipelineNodeMap = pFrame->getPipelineNodeMap();
    Vector<IPipelineDAG::NodeObj_T> const RootNodeObjSet = pFrame->getPipelineDAG().getRootNode();
    Vector<sp<IPipelineNode>> vspPipelineNode;
    {
        for(size_t i=0; i<RootNodeObjSet.size();i++){
            sp<IPipelineNode> pNode = pPipelineNodeMap->nodeAt(RootNodeObjSet[i].val);
            if( mInFlush ) {
                err = pNode->flush(pFrame);
            } else {
                err = pNode->queue(pFrame);
                if(err == FAILED_TRANSACTION){
                    vspPipelineNode.push_back(pNode);
                }
            }
        }
    }
    while(vspPipelineNode.size() != 0){
        {
            // wake up and enque to root node which can not be enqued last time
            Vector<sp<IPipelineNode>>::iterator it = vspPipelineNode.begin();
            while(it != vspPipelineNode.end()){
                if( mInFlush ) {
                    err = (*it)->flush(pFrame);
                } else {
                    err = (*it)->queue(pFrame);
                }
            }
        }
    }
}


=======================================================================================================

 

返回结果阶段


Capture,preview以及autoFocus都是使用的这个回调,而Capture调用的时候,其RequestTag为CAPTURE,
而autoFocus的时候为TAP_TO_FOCUS,而preview请求时没有对RequestTag进行设置,所以回调到onCaptureStarted方法时,
不需要进行处理,但是到此时,preview已经启动成功,可以进行预览了,其数据都在buffer里。

alps/vendor/mediatek/proprietary/hardware/mtkcam3/main/hal/device/3.x/app/AppStreamMgr.CallbackHandler.cpp
threadLoop(){
    if ( ! waitUntilQueue1NotEmpty() ) {
        return true;
    }
    mQueue2.splice(mQueue2.end(), mQueue1);
    mQueue1Cond.broadcast();

    performCallback();
    return  true;
}
performCallback(){
    {
        for (auto const& cbParcel : mQueue2) {
            convertShutterToHidl(cbParcel, vNotifyMsg);
            convertErrorToHidl(cbParcel, vErrorMsg);
            convertMetaToHidl(cbParcel, vCaptureResult, vTempMetadataResult);
            convertImageToHidl(cbParcel, vBufferResult);
        }
        if  ( auto pResultMetadataQueue = mResultMetadataQueue ) {
            for (auto& item : vCaptureResult) {
                if  ( item.v3_2.result.size() == 0 ) {
                    continue;
                }
                if  ( pResultMetadataQueue->availableToWrite() > 0 ) {
                    if  ( CC_LIKELY(pResultMetadataQueue->write(item.v3_2.result.data(), item.v3_2.result.size())) ) {
                        item.v3_2.fmqResultSize = item.v3_2.result.size();
                        item.v3_2.result = hidl_vec<uint8_t>(); //resize(0)
                    }
                    else {
                        item.v3_2.fmqResultSize = 0;
                    }
                }
            }
        }
        //  send callbacks
        {
            if  ( ! vNotifyMsg.empty() ) {
                hidl_vec<NotifyMsg> vecNotifyMsg;
                vecNotifyMsg.setToExternal(vNotifyMsg.data(), vNotifyMsg.size());
                auto ret1 = mCommonInfo->mDeviceCallback->notify(vecNotifyMsg);
            }
            if  ( ! vCaptureResult.empty() ) {
                hidl_vec<V3_4::CaptureResult> vecCaptureResult;
                vecCaptureResult.setToExternal(vCaptureResult.data(), vCaptureResult.size());
                auto ret2 = mCommonInfo->mDeviceCallback->proce*tureResult_3_4(vecCaptureResult);
            }
            if  ( ! vErrorMsg.empty() ) {
                hidl_vec<NotifyMsg> vecErrorMsg;
                vecErrorMsg.setToExternal(vErrorMsg.data(), vErrorMsg.size());
                auto ret1 = mCommonInfo->mDeviceCallback->notify(vecErrorMsg);
            }
            if  ( ! vBufferResult.empty() ) {
                hidl_vec<V3_4::CaptureResult> vecBufferResult;
                vecBufferResult.setToExternal(vBufferResult.data(), vBufferResult.size());
                auto ret2 = mCommonInfo->mDeviceCallback->proce*tureResult_3_4(vecBufferResult);
            }
        }
        //  free the memory of camera_metadata.
        for (auto& v : vTempMetadataResult) {
            mCommonInfo->mMetadataConverter->freeCameraMetadata(v);
            v = nullptr;
        }
    }
}
alps/frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
hardware::Return<void> Camera3Device::proce*tureResult_3_4(
        const hardware::hidl_vec<
                hardware::camera::device::V3_4::CaptureResult>& results)

void Camera3Device::processOneCaptureResultLocked(
        const hardware::camera::device::V3_2::CaptureResult& result,
        const hardware::hidl_vec<
                hardware::camera::device::V3_4::PhysicalCameraMetadata> physicalCameraMetadatas)

void Camera3Device::proce*tureResult(const camera3_capture_result *result) {
    uint32_t frameNumber = result->frame_number;
    CameraMetadata collectedPartialResult;
    {
        ssize_t idx = mInFlightMap.indexOfKey(frameNumber);
        InFlightRequest &request = mInFlightMap.editValueAt(idx);
        //处理部分的结果
        if (result->partial_result != 0)
            request.resultExtras.partialResultCount = result->partial_result;

        if (mUsePartialResult && result->result != NULL) {
            isPartialResult = (result->partial_result < mNumPartialResults);
            if (isPartialResult) {
                request.collectedPartialResult.append(result->result);
            }
            if (isPartialResult && request.hasCallback) {
                sendPartialCaptureResult(result->result, request.resultExtras,
                        frameNumber);
            }
        }
        shutterTimestamp = request.shutterTimestamp;
        hasInputBufferInRequest = request.hasInputBuffer;

        // 保存完整的结果
        if (result->result != NULL && !isPartialResult) {
            for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
                String8 physicalId(result->physcam_ids[i]);
                std::set<String8>::iterator cameraIdIter =
                        request.physicalCameraIds.find(physicalId);
                if (cameraIdIter != request.physicalCameraIds.end()) {
                    request.physicalCameraIds.erase(cameraIdIter);
                }
            }
            if (mUsePartialResult && !request.collectedPartialResult.isEmpty()) {
                collectedPartialResult.acquire(request.collectedPartialResult);
            }
            request.haveResultMetadata = true;
        }

        uint32_t numBuffersReturned = result->num_output_buffers;
        if (result->input_buffer != NULL) {
            if (hasInputBufferInRequest) {
                numBuffersReturned += 1;
            }
        }
        request.numBuffersLeft -= numBuffersReturned;

        //等待shutter消息再处理输出Buffer
        if (shutterTimestamp == 0) {
            request.pendingOutputBuffers.appendArray(result->output_buffers,
                result->num_output_buffers);
        } else {
            returnOutputBuffers(result->output_buffers, result->num_output_buffers, shutterTimestamp);
        }
        // 发送完整的结果
        if (result->result != NULL && !isPartialResult) {
            for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
                CameraMetadata physicalMetadata;
                physicalMetadata.append(result->physcam_metadata[i]);
                request.physicalMetadatas.push_back({String16(result->physcam_ids[i]),
                        physicalMetadata});
            }
            if (shutterTimestamp == 0) {
                request.pendingMetadata = result->result;
                request.collectedPartialResult = collectedPartialResult;
           } else if (request.hasCallback) {
                CameraMetadata metadata;
                metadata = result->result;
                sendCaptureResult(metadata, request.resultExtras,
                    collectedPartialResult, frameNumber,
                    hasInputBufferInRequest, request.physicalMetadatas);
            }
        }
        removeInFlightRequestIfReadyLocked(idx);
    }

    if (result->input_buffer != NULL) {
        if (hasInputBufferInRequest) {
            Camera3Stream *stream =
                Camera3Stream::cast(result->input_buffer->stream);
            res = stream->returnInputBuffer(*(result->input_buffer));
        }
    }
}

 

=======================================================================================================

返回通知
alps/frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
void Camera3Device::notify(const camera3_notify_msg *msg) {
    listener = mListener.promote();
    switch (msg->type) {
        case CAMERA3_MSG_ERROR: {
            notifyError(msg->message.error, listener);
            break;
        }
        case CAMERA3_MSG_SHUTTER: {
            notifyShutter(msg->message.shutter, listener);
            break;
        }
    }
}
void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
        sp<NotificationListener> listener) {
    {
        idx = mInFlightMap.indexOfKey(msg.frame_number);
        if (idx >= 0) {
            InFlightRequest &r = mInFlightMap.editValueAt(idx);
            r.shutterTimestamp = msg.timestamp;
            if (r.hasCallback) {
                if (listener != NULL) {
                    //调用监听的notifyShutter
                    listener->notifyShutter(r.resultExtras, msg.timestamp);
                }
                //将待处理的result发送到Buffer
                sendCaptureResult(r.pendingMetadata, r.resultExtras,
                    r.collectedPartialResult, msg.frame_number,
                    r.hasInputBuffer, r.physicalMetadatas);
            }
            returnOutputBuffers(r.pendingOutputBuffers.array(),
                r.pendingOutputBuffers.size(), r.shutterTimestamp);

            removeInFlightRequestIfReadyLocked(idx);
        }
    }
}
调用客户端监听的notifyShutter
void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
        nsecs_t timestamp) {
    // Thread safe. Don't bother locking.
    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
    if (remoteCb != 0) {
        remoteCb->onCaptureStarted(resultExtras, timestamp);
    }
    Camera2ClientBase::notifyShutter(resultExtras, timestamp);
}
public void onCaptureStarted(final CaptureResultExtras resultExtras, final long timestamp) {
    int requestId = resultExtras.getRequestId();
    final long frameNumber = resultExtras.getFrameNumber();
    final CaptureCallbackHolder holder;
    synchronized(mInterfaceLock) {
        if (mRemoteDevice == null) return; // Camera already closed
        // Get the callback for this frame ID, if there is one
        holder = CameraDeviceImpl.this.mCaptureCallbackMap.get(requestId);
        ......
        // Dispatch capture start notice
        holder.getHandler().post(new Runnable() {
            @Override
            public void run() {
                if (!CameraDeviceImpl.this.isClosed()) {
                    holder.getCallback().onCaptureStarted(CameraDeviceImpl.this,holder.getRequest(
                        resultExtras.getSubsequenceId()),timestamp, frameNumber);
                }
           }
       });
    }
}

将结果插入到队列
void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
        CaptureResultExtras &resultExtras,
        CameraMetadata &collectedPartialResult,
        uint32_t frameNumber,
        bool reprocess,
        const std::vector<PhysicalCaptureResultInfo>& physicalMetadatas) {
    CaptureResult captureResult;
    captureResult.mResultExtras = resultExtras;
    captureResult.mMetadata = pendingMetadata;
    captureResult.mPhysicalMetadatas = physicalMetadatas;

    insertResultLocked(&captureResult, frameNumber);
}
void Camera3Device::insertResultLocked(CaptureResult *result,
        uint32_t frameNumber) {
    camera_metadata_t *meta = const_cast<camera_metadata_t *>(
            result->mMetadata.getAndLock());
    set_camera_metadata_vendor_id(meta, mVendorTagId);

    // Valid result, insert into queue
    List<CaptureResult>::iterator queuedResult =
            mResultQueue.insert(mResultQueue.end(), CaptureResult(*result));
}
返回Buffer
void Camera3Device::returnOutputBuffers(
        const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
        nsecs_t timestamp) {
    for (size_t i = 0; i < numBuffers; i++)
    {
        Camera3Stream *stream = Camera3Stream::cast(outputBuffers[i].stream);
        status_t res = stream->returnBuffer(outputBuffers[i], timestamp);
    }
}
消除没有处理完的请求
void Camera3Device::removeInFlightRequestIfReadyLocked(int idx) {
    const InFlightRequest &request = mInFlightMap.valueAt(idx);
    const uint32_t frameNumber = mInFlightMap.keyAt(idx);

    nsecs_t sensorTimestamp = request.sensorTimestamp;
    nsecs_t shutterTimestamp = request.shutterTimestamp;

    if (request.numBuffersLeft == 0 &&
            (request.skipResultMetadata ||
            (request.haveResultMetadata && shutterTimestamp != 0))) {
        returnOutputBuffers(request.pendingOutputBuffers.array(),
            request.pendingOutputBuffers.size(), 0);
     }

    if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
        CLOGE("In-flight list too large: %zu", mInFlightMap.size());
    } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
            kInFlightWarnLimitHighSpeed) {
        CLOGE("In-flight list too large for high speed configuration: %zu",
                mInFlightMap.size());
    }
}