天天看點

Android中的網絡管理源碼分析--netd1      Netd簡介2     Netd的啟動過程

http://www.voidcn.com/blog/a34140974/article/p-5033426.html

1      Netd簡介

Netd是Android的網絡守護程序。NetD是個網絡管家,封裝了複雜的底層各種類型的網絡(NAT,PLAN,PPP,SOFTAP,TECHER,ETHO,MDNS等),隔離了底層網絡接口的差異,給Framework提供了統一調用接口,簡化了網絡的使用。NetD主要功能是:第一、接收Framework的網絡請求,處理請求,向Framework層回報處理結果;第二、監聽網絡事件(斷開/連接配接/錯誤等),向Framework層上報。

2     Netd的啟動過程

Netd作為背景服務程序在Andriod系統啟動的init1階段就被啟動了,其在init.rc檔案的配置如下:

service netd /system/bin/netd

    class main

    socket netd stream 0660 root system

    socket dnsproxyd stream 0660 root inet

    socket mdns stream 0660 root system

    socket fwmarkd stream 0660 root inet

看一看到,這裡為netd配置了4個socket(比老版本多了一個名字為“fwmakd”的socket),根據配置可找到netd的入口函數為main():

int main() {

    CommandListener *cl;

    NetlinkManager *nm;

    DnsProxyListener *dpl;

    MDnsSdListener *mdnsl;

    FwmarkServer* fwmarkServer;

    ALOGI("Netd 1.0 starting");

    remove_pid_file();//猜測為每次重新開機時删除舊的

    blockSigpipe();//禁止SIGPIPE中斷

    //建立NetlinkManager執行個體

    if (!(nm = NetlinkManager::Instance())) {

        ALOGE("Unable to create NetlinkManager");

        exit(1);

    };

    //建立CommandListener執行個體,并将其設定為NetlinkManager的Broadcaster,之後啟動nm

    cl = new CommandListener();

    nm->setBroadcaster((SocketListener *) cl);

    if (nm->start()) {

        ALOGE("Unable to start NetlinkManager (%s)", strerror(errno));

        exit(1);

    }

    // Set local DNS mode, to prevent bionic from proxying

    // back to this service, recursively.

    setenv("ANDROID_DNS_MODE", "local", 1);

    //建立并開始監聽“dnsproxyd”socket

    dpl = new DnsProxyListener(CommandListener::sNetCtrl);

    if (dpl->startListener()) {

        ALOGE("Unable to start DnsProxyListener (%s)", strerror(errno));

        exit(1);

    }

    //建立并開始監聽“mdns”socket

    mdnsl = new MDnsSdListener();

    if (mdnsl->startListener()) {

        ALOGE("Unable to start MDnsSdListener (%s)", strerror(errno));

        exit(1);

    }

    //建立并開始監聽“fwmarkd”socket

    fwmarkServer = new FwmarkServer(CommandListener::sNetCtrl);

    if (fwmarkServer->startListener()) {

        ALOGE("Unable to start FwmarkServer (%s)", strerror(errno));

        exit(1);

    }

//開始監聽“netd”socket

    if (cl->startListener()) {

        ALOGE("Unable to start CommandListener (%s)", strerror(errno));

        exit(1);

    }

    bool wrote_pid = write_pid_file();

    while(1) {

        sleep(30); // 30 sec

        if (!wrote_pid) {

            wrote_pid = write_pid_file();

        }

    }

    ALOGI("Netd exiting");

    remove_pid_file();

    exit(0);

}

從上面個可以看出netd的啟動并不複雜,主要是啟動了4個監聽socket,後面的分析将會看到每個socket對應這一個監聽線程。首先來看NetlinkManage,NetlinkManager(以後簡稱NM)主要負責接收并解析來自Kernel的UEvent消息。如果對linux的socket特别熟悉的話,光從“NetlinkMananger”的名字就能推斷出此類的基本實作和作用:肯定使用了PF_NETLINK的socket。這種socket一般是在應用層(相對于核心)監聽核心事件的時候使用。例如USB的插拔等等。從main的代碼可以知道它的入口為start()函數。

int NetlinkManager::start() {

  //建立接收NETLINK_KOBJECT_UEVENT消息的socket,其值儲存在mUeventSock中

  //其中,NETLINK_FORMAT_ASCII代表UEvent消息的内容為ASCII字元串

    if ((mUeventHandler = setupSocket(&mUeventSock, NETLINK_KOBJECT_UEVENT,

         0xffffffff, NetlinkListener::NETLINK_FORMAT_ASCII, false)) == NULL) {

        return -1;

    }

//建立接收RTMGPR_LINK消息的socket,其值儲存在mRouteSock中

  //其中,NETLINK_FORMAT_BINARY代表UEvent消息的類型為結構體,故需要進行二進制解析

    if ((mRouteHandler = setupSocket(&mRouteSock, NETLINK_ROUTE,

                                     RTMGRP_LINK |

                                     RTMGRP_IPV4_IFADDR |

                                     RTMGRP_IPV6_IFADDR |

                                     RTMGRP_IPV6_ROUTE |

                                     (1 << (RTNLGRP_ND_USEROPT - 1)),

         NetlinkListener::NETLINK_FORMAT_BINARY, false)) == NULL) {

        return -1;

    }

  //建立接收NETLINK_NFLOG消息的socket,其值儲存在mQuotaSock中

    if ((mQuotaHandler = setupSocket(&mQuotaSock, NETLINK_NFLOG,

            NFLOG_QUOTA_GROUP, NetlinkListener::NETLINK_FORMAT_BINARY, false)) == NULL) {

        ALOGE("Unable to open quota socket");

    }

//建立接收NETLINK_NETFILTER消息的socket,其值儲存在mQuotaSock中

      if ((mStrictHandler = setupSocket(&mStrictSock, NETLINK_NETFILTER,

            0, NetlinkListener::NETLINK_FORMAT_BINARY_UNICAST, true)) == NULL) {

        ALOGE("Unable to open strict socket");

    }

    return 0;

}

start()四次調用了setupSocket函數,建立了4個PF_NETLINK類型的socket監聽核心的不同僚件。檢視函數setupSocket()。

NetlinkHandler *NetlinkManager::setupSocket(int *sock, int netlinkFamily,

    int groups, int format, bool configNflog) {

    struct sockaddr_nl nladdr;

    int sz = 64 * 1024;

    int on = 1;

    memset(&nladdr, 0, sizeof(nladdr));

    nladdr.nl_family = AF_NETLINK;

    nladdr.nl_pid = getpid();

    nladdr.nl_groups = groups;

    //建立socket,一定要注意這裡的socket類型為SOCK_DGRAM,這句是整個Nm的關鍵

    //netlinkFamily指定了soket監聽的核心事件

    if ((*sock = socket(PF_NETLINK, SOCK_DGRAM | SOCK_CLOEXEC, netlinkFamily)) < 0) {

        ALOGE("Unable to create netlink socket: %s", strerror(errno));

        return NULL;

    }

    //設定socket的屬性

    if (setsockopt(*sock, SOL_SOCKET, SO_RCVBUFFORCE, &sz, sizeof(sz)) < 0) {

        ALOGE("Unable to set uevent socket SO_RCVBUFFORCE option: %s", strerror(errno));

        close(*sock);

        return NULL;

    }

    if (setsockopt(*sock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on)) < 0) {

        SLOGE("Unable to set uevent socket SO_PASSCRED option: %s", strerror(errno));

        close(*sock);

        return NULL;

    }

    //綁定

    if (bind(*sock, (struct sockaddr *) &nladdr, sizeof(nladdr)) < 0) {

        ALOGE("Unable to bind netlink socket: %s", strerror(errno));

        close(*sock);

        return NULL;

    }

    if (configNflog) {//隻有mStrictSock對應的為true

        if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_PF_UNBIND, AF_INET) < 0) {

            ALOGE("Failed NFULNL_CFG_CMD_PF_UNBIND: %s", strerror(errno));

            return NULL;

        }

        if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_PF_BIND, AF_INET) < 0) {

            ALOGE("Failed NFULNL_CFG_CMD_PF_BIND: %s", strerror(errno));

            return NULL;

        }

        if (android_nflog_send_config_cmd(*sock, 0, NFULNL_CFG_CMD_BIND, AF_UNSPEC) < 0) {

            ALOGE("Failed NFULNL_CFG_CMD_BIND: %s", strerror(errno));

            return NULL;

        }

    }

    //将socket封裝成 NetLinkHandler,進而在socket有活動的時候處理

    NetlinkHandler *handler = new NetlinkHandler(this, *sock, format);

    if (handler->start()) {//啟動NetlinkHandler,實際就是啟動監聽

        ALOGE("Unable to start NetlinkHandler: %s", strerror(errno));

        close(*sock);

        return NULL;

    }

    return handler;

}

NetlinkHandler的start()函數轉調了this-> startListener(),此方法實際上是繼承自SocketListener類。這個類是一個比較通用的類,很多與socket的IO複用有關的子產品都會調用此類的相關方法。

int SocketListener::startListener(int backlog) {

//注意這個變量是類的成員變量,實際上這裡就是想方設法得到socket

    if (!mSocketName && mSock == -1) {

        SLOGE("Failed to start unbound listener");

        errno = EINVAL;

        return -1;

    } else if (mSocketName) {

        if ((mSock = android_get_control_socket(mSocketName)) < 0) {

            SLOGE("Obtaining file descriptor socket '%s' failed: %s",

                 mSocketName, strerror(errno));

            return -1;

        }

        SLOGV("got mSock = %d for %s", mSock, mSocketName);

        fcntl(mSock, F_SETFD, FD_CLOEXEC);

    }

   //如果設定了mListen則監聽socket,如果沒有設定則建立一個socketClient放入用戶端集合

   //注意短路,對于NetlinkHandler,從其構造函數可知mListen為false

    if (mListen && listen(mSock, backlog) < 0) {

        SLOGE("Unable to listen on socket (%s)", strerror(errno));

        return -1;

    } else if (!mListen)

        mClients->push_back(new SocketClient(mSock, false, mUseCmdNum));//這裡

    if (pipe(mCtrlPipe)) {

        SLOGE("pipe failed (%s)", strerror(errno));

        return -1;

    }

   //建立線程處理監聽socket,這裡其實并沒有所謂的“監聽socket”,因為是NETLINK型的socket

    if (pthread_create(&mThread, NULL, SocketListener::threadStart, this)) {

        SLOGE("pthread_create (%s)", strerror(errno));

        return -1;

    }

    return 0;

}

進入線程的入口函數SocketListener::threadStart()

void *SocketListener::threadStart(void *obj) {

    SocketListener *me = reinterpret_cast<SocketListener *>(obj);

    //注意obj為主線程傳遞進來的參數,就是SocketListener

    me->runListener();

    pthread_exit(NULL);

    return NULL;

}

進入runListener

void SocketListener::runListener() {

    //此函數的主要邏輯就是select()

    SocketClientCollection pendingList;//建立一個socketClientCollection存放活動fd

    while(1) {

        SocketClientCollection::iterator it;

        fd_set read_fds;

        int rc = 0;

        int max = -1;

        FD_ZERO(&read_fds);

        if (mListen) {//監聽listenSocket的讀事件,前面已經知道mListen此時為fasle

            max = mSock;

            FD_SET(mSock, &read_fds);

        }

        FD_SET(mCtrlPipe[0], &read_fds);//這裡的pipe什麼作用?中斷循環标志?

        if (mCtrlPipe[0] > max)

            max = mCtrlPipe[0];

        pthread_mutex_lock(&mClientsLock);

        //周遊mClients集合

        for (it = mClients->begin(); it != mClients->end(); ++it) {

            // NB: calling out to an other object with mClientsLock held (safe)

            int fd = (*it)->getSocket();//擷取與客戶通信的socket

            FD_SET(fd, &read_fds);//監聽它

            if (fd > max) {

                max = fd;

            }

        }

        pthread_mutex_unlock(&mClientsLock);

        SLOGV("mListen=%d, max=%d, mSocketName=%s", mListen, max, mSocketName);

        if ((rc = select(max + 1, &read_fds, NULL, NULL, NULL)) < 0) {//select

            if (errno == EINTR)

                continue;

            SLOGE("select failed (%s) mListen=%d, max=%d", strerror(errno), mListen, max);

            sleep(1);

            continue;

        } else if (!rc)

            continue;

        if (FD_ISSET(mCtrlPipe[0], &read_fds)) {//如果是pipe有活動

            char c = CtrlPipe_Shutdown;

            TEMP_FAILURE_RETRY(read(mCtrlPipe[0], &c, 1));//讀取管道

            if (c == CtrlPipe_Shutdown) {

                break;//難道這就是監聽pipe的作用?

            }

            continue;

        }

//如果是監聽socket,接收連接配接請求,當然NETLINK不會走這裡

        if (mListen && FD_ISSET(mSock, &read_fds)) {

            struct sockaddr addr;

            socklen_t alen;

            int c;

            do {

                alen = sizeof(addr);

                c = accept(mSock, &addr, &alen);

                SLOGV("%s got %d from accept", mSocketName, c);

            } while (c < 0 && errno == EINTR);

            if (c < 0) {

                SLOGE("accept failed (%s)", strerror(errno));

                sleep(1);

                continue;

            }

            fcntl(c, F_SETFD, FD_CLOEXEC);

            pthread_mutex_lock(&mClientsLock);

//放入client集合

            mClients->push_back(new SocketClient(c, true, mUseCmdNum));

            pthread_mutex_unlock(&mClientsLock);

        }

       //将所有活動的fd都放入pendingList,貌似也隻有一個

        pendingList.clear();

        pthread_mutex_lock(&mClientsLock);

        for (it = mClients->begin(); it != mClients->end(); ++it) {

            SocketClient* c = *it;

            // NB: calling out to an other object with mClientsLock held (safe)

            int fd = c->getSocket();

            if (FD_ISSET(fd, &read_fds)) {//fd如果有活動

                pendingList.push_back(c);//放入pendingList

                c->incRef();

            }

        }

        pthread_mutex_unlock(&mClientsLock);

        //處理pendingList,這裡的具體意思就是核心有事件了,需要上層處理

        while (!pendingList.empty()) {

            it = pendingList.begin();

            SocketClient* c = *it;

            pendingList.erase(it);

            if (!onDataAvailable(c)) {

                release(c, false);

            }

            c->decRef();

        }

    }

}

從上面的函數可以看到,這裡實際上是對3類fd作了監聽處理。一類是監聽socket,一類是client socket,并且這類socket被封裝成SocketClient集中在一個集合之内。還有一個就是pipe。從NetlinkManager.start()中我們已經知道啟動了四套這樣的結構,其socket分别為mUeventSock ,mRouteSock,mQuotaSock,mStrictSock。這些Socket都是PF_NETLINK類型的sockegt,并不是監聽socket,具體一點就是他們對應的mListen均為false。也就是這四個socket被當做SocketClient添加進了mClients(注意有四個執行個體)。等等,那麼監聽socket在哪呢?壓根就沒有監聽socket,這裡采用的是SOCK_DGRAM類型的socket!

Android中的網絡管理源碼分析--netd1      Netd簡介2     Netd的啟動過程

當檢測到這些socket有可讀事件發生時,也就是核心有上層感興趣的事件發生時。相應的onDataAvailable()被調用,這是一個虛函數。分析可知此時this的具體類型為NetlinkHandler,是以調用的是NetlinkHandler的onDataAvailable()。

bool NetlinkListener::onDataAvailable(SocketClient *cli)

{

    int socket = cli->getSocket();

    ssize_t count;

    uid_t uid = -1;

    bool require_group = true;

    if (mFormat == NETLINK_FORMAT_BINARY_UNICAST) {

        require_group = false;

    }

    //讀取資料

    count = TEMP_FAILURE_RETRY(uevent_kernel_recv(socket,

            mBuffer, sizeof(mBuffer), require_group, &uid));

    if (count < 0) {

        if (uid > 0)

            LOG_EVENT_INT(65537, uid);

        SLOGE("recvmsg failed (%s)", strerror(errno));

        return false;

    }

    NetlinkEvent *evt = new NetlinkEvent();//建立一個NetLinkEvent

    if (evt->decode(mBuffer, count, mFormat)) {//解碼

        onEvent(evt);//調用了此函數,對event做了處理

    } else if (mFormat != NETLINK_FORMAT_BINARY) {

        // Don't complain if parseBinaryNetlinkMessage returns false. That can

        // just mean that the buffer contained no messages we're interested in.

        SLOGE("Error decoding NetlinkEvent");

    }

    delete evt;

    return true;

}

這裡調用了onEvent()才是NetlinkHandler的入口。

void NetlinkHandler::onEvent(NetlinkEvent *evt) {

    const char *subsys = evt->getSubsystem();

    if (!subsys) {

        ALOGW("No subsystem found in netlink event");

        return;

    }

    if (!strcmp(subsys, "net")) {

        NetlinkEvent::Action action = evt->getAction();

        const char *iface = evt->findParam("INTERFACE");

        if (action == NetlinkEvent::Action::kAdd) {

            notifyInterfaceAdded(iface);

        } else if (action == NetlinkEvent::Action::kRemove) {

            notifyInterfaceRemoved(iface);

        } else if (action == NetlinkEvent::Action::kChange) {

            evt->dump();

            notifyInterfaceChanged("nana", true);

        } else if (action == NetlinkEvent::Action::kLinkUp) {

            notifyInterfaceLinkChanged(iface, true);

        } else if (action == NetlinkEvent::Action::kLinkDown) {

            notifyInterfaceLinkChanged(iface, false);

        } else if (action == NetlinkEvent::Action::kAddressUpdated ||

                   action == NetlinkEvent::Action::kAddressRemoved) {

            const char *address = evt->findParam("ADDRESS");

            const char *flags = evt->findParam("FLAGS");

            const char *scope = evt->findParam("SCOPE");

            if (action == NetlinkEvent::Action::kAddressRemoved && iface && address) {

                int resetMask = strchr(address, ':') ? RESET_IPV6_ADDRESSES : RESET_IPV4_ADDRESSES;

                resetMask |= RESET_IGNORE_INTERFACE_ADDRESS;

                if (int ret = ifc_reset_connections(iface, resetMask)) {

                    ALOGE("ifc_reset_connections failed on iface %s for address %s (%s)", iface,

                          address, strerror(ret));

                }

            }

            if (iface && flags && scope) {

                notifyAddressChanged(action, address, iface, flags, scope);

            }

        } else if (action == NetlinkEvent::Action::kRdnss) {

            const char *lifetime = evt->findParam("LIFETIME");

            const char *servers = evt->findParam("SERVERS");

            if (lifetime && servers) {

                notifyInterfaceDnsServers(iface, lifetime, servers);

            }

        } else if (action == NetlinkEvent::Action::kRouteUpdated ||

                   action == NetlinkEvent::Action::kRouteRemoved) {

            const char *route = evt->findParam("ROUTE");

            const char *gateway = evt->findParam("GATEWAY");

            const char *iface = evt->findParam("INTERFACE");

            if (route && (gateway || iface)) {

                notifyRouteChange(action, route, gateway, iface);

            }

        }

    } else if (!strcmp(subsys, "qlog")) {

        const char *alertName = evt->findParam("ALERT_NAME");

        const char *iface = evt->findParam("INTERFACE");

        notifyQuotaLimitReached(alertName, iface);

    } else if (!strcmp(subsys, "strict")) {

        const char *uid = evt->findParam("UID");

        const char *hex = evt->findParam("HEX");

        notifyStrictCleartext(uid, hex);

    } else if (!strcmp(subsys, "xt_idletimer")) {

        const char *label = evt->findParam("INTERFACE");

        const char *state = evt->findParam("STATE");

        const char *timestamp = evt->findParam("TIME_NS");

        const char *uid = evt->findParam("UID");

        if (state)

            notifyInterfaceClassActivity(label, !strcmp("active", state),

                                         timestamp, uid);

#if !LOG_NDEBUG

    } else if (strcmp(subsys, "platform") && strcmp(subsys, "backlight")) {

        ALOGV("unexpected event from subsystem %s", subsys);

#endif

    }

}

可以看到,這裡對不同的時間進行看了notifXxx所有的notifyXXXXX函數都會調用notify()函數

void NetlinkHandler::notify(int code, const char *format, ...) {

    char *msg;

    va_list args;

    va_start(args, format);

    if (vasprintf(&msg, format, args) >= 0) {

        //一定要注意這裡的所使用的是cl的socket,名字為”netd”,而非之前的那四個

        mNm->getBroadcaster()->sendBroadcast(code, msg, false);

        free(msg);

    } else {

        SLOGE("Failed to send notification: vasprintf: %s", strerror(errno));

    }

    va_end(args);

}

mNm就是之前在main()中建立的NetworkMananger。其BroadCaster已經設定為了cl(即CommandListener的一個執行個體)。CommandListener通過netd向NetworkManagementService發送消息。這裡的消息可能有兩種:一種是底層主動上報的消息,另一種是上層請求的response。(這個和RILD很類似)

現在我們來整理一下上面的步驟:NetlinkManager建立了4個PF_NETLINK類型的socket,監聽核心發生的uEvent。當核心發生相應的uEvent被對應的 NetlinkManager檢測到。NetlinkManager将着個uEvent轉化為NetlinkEvent 通過CommandListener廣播到更上層。而這裡的“更上層”指的是java層。可見,底層C/C++和上層java的聯系是通過socket聯系在一起的。

這裡一定要清楚出兩點:之前的4個socket并不是這裡的BroadCaster的socket;而且,個人覺得這個BroadCaster名字也容易讓人産生誤解,以為是廣播,廣播對應的socket就應該是UDP。而實際上這個socket是init.rc配置的名字為“netd”的socke所accept出來的clientSocket,是一個TCPsocket。而TCPsocket是無法廣播的。這裡直接将sendBroadCast了解為sendMsg後面的就很好了解了。

接着分析CommandListener。這個類同樣繼承自SocketListener,與之前的4個Netlink socket所不同的是此類的mListen被設定為true。也就是“netd”為監聽socket。CommandListener在之前的main函數中調用startListener開啟監聽來自java層的連接配接。當上層有連接配接時,select傳回,accpet得到一個clientSocket,之後将其封裝成SocketClient添加經list,并添加進select的監聽隊列。當java層下發指令,SocketClient的可讀事件被檢測到,進而做後續的處理,最後将底層處理結果response回上層,底層主動上報的消息也是通過此clientSocket上發到上層的。熟悉網絡程式設計的就應該知道,這裡是一個很典型的Select型的IO複用服務端模型。

除“netd”外,其他三個在init.rc中配置的socket:dnsproxyd  mdns  fwmarkd也建構了幾乎一樣的服務端結構。這裡就不再贅述。以下為netd的大緻框圖:

Android中的網絡管理源碼分析--netd1      Netd簡介2     Netd的啟動過程

繼續閱讀