Android开发之漫漫长途 Ⅷ——Android Binder(也许是最容易理解的) (6)

继续跟进[ProcessState.cpp]

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle) { sp<IBinder> result; AutoMutex _l(mLock); handle_entry* e = lookupHandleLocked(handle); if (e != NULL) { IBinder* b = e->binder; if (b == NULL || !e->refs->attemptIncWeak(this)) { if (handle == 0) {//这里handle为0的情况是为SM准备的, Parcel data; status_t status = IPCThreadState::self()->transact( 0, IBinder::PING_TRANSACTION, data, NULL, 0); if (status == DEAD_OBJECT) return NULL; } //我们的不为0,在这里创建了BpBinder b = new BpBinder(handle); e->binder = b; if (b) e->refs = b->getWeakRefs(); result = b; } else { result.force_set(b); e->refs->decWeak(this); } } return result; }

好了剩下最后一个了javaObjectForIBinder

[android_ util_Binder.cpp]

jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val) { if (val == NULL) return NULL; //如果val是Binder对象,进入下面分支,此时val是BpBinder if (val->checkSubclass(&gBinderOffsets)) { // One of our own! jobject object = static_cast<JavaBBinder*>(val.get())->object(); LOGDEATH("objectForBinder %p: it's our own %p!\n", val.get(), object); return object; } ......... //调用BpBinder的findObject函数 //在Native层的BpBinder中有一个ObjectManager,它用来管理在Native BpBinder上创建的Java BinderProxy对象 //findObject用于判断gBinderProxyOffsets中,是否存储了已经被ObjectManager管理的Java BinderProxy对象 jobject object = (jobject)val->findObject(&gBinderProxyOffsets); if (object != NULL) { jobject res = jniGetReferent(env, object); ............ //如果该Java BinderProxy已经被管理,则删除这个旧的BinderProxy android_atomic_dec(&gNumProxyRefs); val->detachObject(&gBinderProxyOffsets); env->DeleteGlobalRef(object); } //创建一个新的BinderProxy对象 object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor); if (object != NULL) { // The proxy holds a reference to the native object. env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get()); val->incStrong((void*)javaObjectForIBinder); // The native object needs to hold a weak reference back to the // proxy, so we can retrieve the same proxy if it is still active. jobject refObject = env->NewGlobalRef( env->GetObjectField(object, gBinderProxyOffsets.mSelf)); //新创建的BinderProxy对象注册到BpBinder的ObjectManager中,同时注册一个回收函数proxy_cleanup //当BinderProxy对象detach时,proxy_cleanup函数将被调用,以释放一些资源 val->attachObject(&gBinderProxyOffsets, refObject, jnienv_to_javavm(env), proxy_cleanup); // Also remember the death recipients registered on this proxy sp<DeathRecipientList> drl = new DeathRecipientList; drl->incStrong((void*)javaObjectForIBinder); //将死亡通知list和BinderProxy联系起来 env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get())); // Note that a new object reference has been created. android_atomic_inc(&gNumProxyRefs); //垃圾回收相关;利用gNumRefsCreated记录创建出的BinderProxy数量 //当创建出的BinderProxy数量大于200时,该函数将利用BinderInternal的ForceGc函数进行一个垃圾回收 incRefsCreated(env); return object; } }

到这里总算都打通了总体流程如下

Android开发之漫漫长途 Ⅷ——Android Binder(也许是最容易理解的)

驱动层Binder

还剩下遗留下的两个问题

AMS代理是如何与Binder通信的?

通过Java层的服务端代理最终调用到BpBinder.transact函数

[BpBinder.cpp]

status_t BpBinder:transact(uint32_t code,const Parcel&data,Parcel*reply,uint32_t flags){ if(mAlive){ //BpBinder把transact工作交给了IPCThreadState。 status_t status=IPCThreadState:self()->transact( mHandle,code,data,reply,flags);//mHandle也是参数 if(status==DEAD_OBJECT)mAlive=0; return status; } return DEAD_OBJECT; }

[IPCThreadState.cpp]

IPCThreadState::IPCThreadState() : mProcess(ProcessState::self()), mMyThreadId(gettid()), mStrictModePolicy(0), mLastTransactionBinderFlags(0) { pthread_setspecific(gTLS, this); clearCaller(); //mIn和mOut是两个Parcel。 把它看成是发送和接收命令的缓冲区即可。 mIn.setDataCapacity(256); mOut.setDataCapacity(256); } status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { ...... /* 注意这里的第一个参数BC_TRANSACTION,它是应用程序向binder设备发送消息的消 息码,而binder设备向应用程序回复消息的消息码以BR_开头。 消息码的定义在 binder_module.h中,请求消息码和回应消息码的对应关系,需要查看Binder驱动的实 现才能将其理清楚,我们这里暂时用不上。 */ err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL); ...... err = waitForResponse(NULL, NULL); ...... return err; } status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer) { //binder_transaction_data是和binder设备通信的数据结构。 binder_transaction_data tr; //果然,handle的值传递给了target,用来标识目的端,其中0是ServiceManager的标志。 tr.target.handle=handle; //code是消息码,是用来switch/case的! tr.code = code; tr.flags = binderFlags; tr.cookie = 0; tr.sender_pid = 0; tr.sender_euid = 0; const status_t err = data.errorCheck(); if (err == NO_ERROR) { tr.data_size = data.ipcDataSize(); tr.data.ptr.buffer = data.ipcData(); tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); tr.data.ptr.offsets = data.ipcObjects(); } else if (statusBuffer) { tr.flags |= TF_STATUS_CODE; *statusBuffer = err; tr.data_size = sizeof(status_t); tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer); tr.offsets_size = 0; tr.data.ptr.offsets = 0; } else { return (mLastError = err); } //把命令写到mOut中,而不是直接发出去,可见这个函数有点名不副实。 mOut.writeInt32(cmd); mOut.write(&tr, sizeof(tr)); return NO_ERROR; } status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult) { uint32_t cmd; int32_t err; while (1) { if ((err=talkWithDriver()) <NO_ERROR) break; err = mIn.errorCheck(); if (err < NO_ERROR) break; if (mIn.dataAvail() == 0) continue; //看见没?这里开始操作mIn了,看来talkWithDriver中 //把mOut发出去,然后从driver中读到数据放到mIn中了。 cmd = mIn.readInt32(); } } status_t IPCThreadState::talkWithDriver(bool doReceive) { binder_write_read bwr; //中间东西太复杂了,不就是把mOut数据和mIn接收数据的处理后赋值给bwr吗? status_t err; do { //用ioctl来读写 if (ioctl(mProcess->mDriverFD,BINDER_WRITE_READ, &bwr) >= 0) err = NO_ERROR; else err = -errno; } while (err == -EINTR); //到这里,回复数据就在bwr中了,bmr接收回复数据的buffer就是mIn提供的 if (bwr.read_consumed > 0) { mIn.setDataSize(bwr.read_consumed); mIn.setDataPosition(0); } returnNO_ERROR; } 本篇总结

我们本篇详细分析了Binder机制,从概述->Java层Binder->Native层Binder->Binder驱动,位于各层次的读者都能获得收获。

下篇预告

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/zyjpzf.html