天天看點

Android -- Audio系統之AudioTrack内部實作簡析(三)Android -- Audio系統之AudioTrack内部實作簡析(三)

Android -- Audio系統之AudioTrack内部實作簡析(三)

上一篇中,我們介紹了一個使用AudioTrack的簡單示例,并初步分析了使用AudioTrack API時每一步的Java調用實作;承接之前的内容,我們對應Demo中的示例,分析Java AudioTrack實作,最後得出了6個重要的函數調用:

  • native_get_min_buff_size():擷取最小Buffer大小
  • native_setup():完成AudioTrack的建立
  • native_start():AudioTrack啟動播放
  • native_write_byte()/native_write_short()...:往AudioTrack寫入音頻資料
  • natvie_stop():AudioTrack停止播放
  • native_release():釋放AudioTrack綁定的系統資源

這幾個函數調用将Java層的工作轉入到了JNI層android_media_AudioTrack.cpp;下面就緊接着分析JNI中對這些内容是如何處理的。

一、android_media_AudioTrack_get_min_buff_size()

我們擷取所需的最小Buffer大小時,會調用到native_get_min_buff_size()方法;而它對應的JNI實作是如下函數:

// ----------------------------------------------------------------------------
// returns the minimum required size for the successful creation of a streaming AudioTrack
// returns -1 if there was an error querying the hardware.
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env,  jobject thiz,
    jint sampleRateInHertz, jint channelCount, jint audioFormat) {

    size_t frameCount;
    const status_t status = AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
            sampleRateInHertz);//根據資訊,得到所需的最小幀個數
    if (status != NO_ERROR) {
        ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d",
                sampleRateInHertz, status);
        return -1;
    }
    const audio_format_t format = audioFormatToNative(audioFormat);//Java和JNI之間的類型轉換
    if (audio_has_proportional_frames(format)) {//隻在PCM和IEC61937時,傳回TRUE
        const size_t bytesPerSample = audio_bytes_per_sample(format);//傳回該格式下每個采樣點有多少位元組資料
        return frameCount * channelCount * bytesPerSample;//一機關的幀 = 一個采樣點的位元組數 X 聲道數;一機關的幀資料 X 所需幀的最小個數 = 這裡的min_buf_size.
    } else {
        return frameCount;
    }
}
           

首先調用了AudioTrack::getMinFrameCount()函數。這裡的AudioTrack存在于Native空間,它與前面使用的Java AudioTrack對應,是它在Native空間的代理,負責與AudioFlinger互動。

getMinFrameCount()函數會通過AudioSystem去查詢目前Audio硬體支援的各種資訊,進行綜合得出這裡所需的最小幀個數:

// static
status_t AudioTrack::getMinFrameCount(
        size_t* frameCount,
        audio_stream_type_t streamType,
        uint32_t sampleRate)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    // FIXME handle in server, like createTrack_l(), possible missing info:
    //          audio_io_handle_t output
    //          audio_format_t format
    //          audio_channel_mask_t channelMask
    //          audio_output_flags_t flags (FAST)
    uint32_t afSampleRate;
    status_t status;
    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);//查詢硬體支援的采樣率大小,一般傳回支援的峰值
    if (status != NO_ERROR) {
        ALOGE("Unable to query output sample rate for stream type %d; status %d",
                streamType, status);
        return status;
    }
    size_t afFrameCount;
    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);//查詢硬體内部的緩沖區大小,以幀為機關
    if (status != NO_ERROR) {
        ALOGE("Unable to query output frame count for stream type %d; status %d",
                streamType, status);
        return status;
    }
    uint32_t afLatency;
    status = AudioSystem::getOutputLatency(&afLatency, streamType);//查詢硬體的延時時間資訊
    if (status != NO_ERROR) {
        ALOGE("Unable to query output latency for stream type %d; status %d",
                streamType, status);
        return status;
    }

    // When called from createTrack, speed is 1.0f (normal speed).
    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
            /*, 0 notificationsPerBufferReq*/);//計算所需的最小幀個數

    // The formula above should always produce a non-zero value under normal circumstances:
    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
    // Return error in the unlikely event that it does not, as that's part of the API contract.
    if (*frameCount == 0) {
        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
                streamType, sampleRate);
        return BAD_VALUE;
    }
    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
            *frameCount, afFrameCount, afSampleRate, afLatency);
    return NO_ERROR;
}
           

這裡的AudioSystem也存在于Native空間;類似地,Java空間也有AudioSystem類。它們之間的關系與AudioTrack類似。AudioSystem可以看做是一個工具類,它封裝了很多查詢Audio裝置資訊的方法供外界調用,這些方法的内部一般都借助AudioFlinger實作。在查詢了硬體資訊之後,就會去計算此時需要的最小幀個數:

// Must match similar computation in createTrack_l in Threads.cpp.
// TODO: Move to a common library
static size_t calculateMinFrameCount(
        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
{
    // Ensure that buffer depth covers at least audio hardware latency
    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);//硬體内部緩沖區的最小個數,最低為2個;以幀為機關
    if (minBufCount < 2) {
        minBufCount = 2;
    }
#if 0
    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
    // but keeping the code here to make it easier to add later.
    if (minBufCount < notificationsPerBufferReq) {
        minBufCount = notificationsPerBufferReq;
    }
#endif
    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
            /*, notificationsPerBufferReq*/);
    return minBufCount * sourceFramesNeededWithTimestretch(
            sampleRate, afFrameCount, afSampleRate, speed);
}
           

然後傳回該結果。接着計算最小Buffer大小:

if (audio_has_proportional_frames(format)) {//隻在PCM和IEC61937時,傳回TRUE
        const size_t bytesPerSample = audio_bytes_per_sample(format);//傳回該格式下每個采樣點有多少位元組資料
        return frameCount * channelCount * bytesPerSample;//一機關的幀 = 一個采樣點的位元組數 X 聲道數;一機關的幀資料 X 所需幀的最小個數 = 這裡的min_buf_size.
    } else {
        return frameCount;
    }
           

根據分析場景,最後的Buffer大小就是:

frameCount * channelCount * bytesPerSample
           
  1. 一機關的幀 = 一個采樣點的位元組數 X 聲道數;
  2. min_buf_size.=一機關的幀資料 X 所需幀的最小個數;

至此,我們就得到了所需的min_buf_size。這部分内容我們隻會分析到JNI層,不過由于該部分内容中涉及Native AudioTrack的部分都較為簡單,索性就一起分析了。

二、native_setup()

得到了所需的min_buf_size後,我們就會去建立Java AudioTrack執行個體,準備播放了;Java AudioTrack對象的建立最好會調用到native_setup()函數,它的JNI實作是:

// ----------------------------------------------------------------------------
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
        jlong nativeAudioTrack/*0*/) {

    ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"
        "nativeAudioTrack=0x%llX",
        jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
        nativeAudioTrack);

    sp<AudioTrack> lpTrack = 0;

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIO_JAVA_ERROR;
    }
    audio_session_t sessionId = (audio_session_t) nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    AudioTrackJniStorage* lpJniStorage = NULL;//儲存一些重要資訊

    audio_attributes_t *paa = NULL;//儲存AudioAttributes中的一些設定資訊

    jclass clazz = env->GetObjectClass(thiz);//擷取Java空間AudioTrack的類對象
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
    if (nativeAudioTrack == 0) {//nativeAudioTrack判别目前是否需要重新建構Native AudioTrack
        if (jaa == 0) {
            ALOGE("Error creating AudioTrack: invalid audio attributes");
            return (jint) AUDIO_JAVA_ERROR;
        }

        if (jSampleRate == 0) {
            ALOGE("Error creating AudioTrack: invalid sample rates");
            return (jint) AUDIO_JAVA_ERROR;
        }

        int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
        int sampleRateInHertz = sampleRates[0];
        env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);

        // Invalid channel representations are caught by !audio_is_output_channel() below.
        audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                channelPositionMask, channelIndexMask);
        if (!audio_is_output_channel(nativeChannelMask)) {
            ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
        }

        uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);

        // check the format.
        // This function was called from Java, so we compare the format against the Java constants
        audio_format_t format = audioFormatToNative(audioFormat);
        if (format == AUDIO_FORMAT_INVALID) {
            ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
            return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
        }

        // compute the frame count
        size_t frameCount;
        if (audio_is_linear_pcm(format)) {
            const size_t bytesPerSample = audio_bytes_per_sample(format);
            frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
        } else {
            frameCount = buffSizeInBytes;
        }

        // create the native AudioTrack object
        lpTrack = new AudioTrack();//建立 Native AudioTrack,使用無參構造函數,隻會初始化一些成員值

        // read the AudioAttributes values
        paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));//初始化paa
        //将AudioAttributes中的一些配置資訊儲存到paa結構對象中
        const jstring jtags =
                (jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
        const char* tags = env->GetStringUTFChars(jtags, NULL);
        // copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
        strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
        env->ReleaseStringUTFChars(jtags, tags);
        paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);
        paa->content_type =
                (audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);
        paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);

        ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
                paa->usage, paa->content_type, paa->flags, paa->tags);

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        lpJniStorage = new AudioTrackJniStorage();//創造AudioTrackJniStorage執行個體

		//将一些資訊儲存到AudioTrackJniStorage::mCallbackData結構對象中,用于後續的回調
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);//指向Java AudioTrack類執行個體
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);//指向Java AudioTrack執行個體的弱引用
        lpJniStorage->mCallbackData.busy = false;

        // initialize the native AudioTrack object
        status_t status = NO_ERROR;
        switch (memoryMode) {
        case MODE_STREAM:

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SYNC,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        case MODE_STATIC:
            // AudioTrack is using shared memory

            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        default:
            ALOGE("Unknown mode %d", memoryMode);
            goto native_init_failure;
        }

        if (status != NO_ERROR) {
            ALOGE("Error %d initializing AudioTrack", status);
            goto native_init_failure;
        }
    } else {  // end if (nativeAudioTrack == 0)
        lpTrack = (AudioTrack*)nativeAudioTrack;
        // TODO: We need to find out which members of the Java AudioTrack might
        // need to be initialized from the Native AudioTrack
        // these are directly returned from getters:
        //  mSampleRate
        //  mAudioFormat
        //  mStreamType
        //  mChannelConfiguration
        //  mChannelCount
        //  mState (?)
        //  mPlayState (?)
        // these may be used internally (Java AudioTrack.audioParamCheck():
        //  mChannelMask
        //  mChannelIndexMask
        //  mDataLoadMode

        // initialize the callback information:
        // this data will be passed with every AudioTrack callback
        lpJniStorage = new AudioTrackJniStorage();
        lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
        // we use a weak reference so the AudioTrack object can be garbage collected.
        lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
        lpJniStorage->mCallbackData.busy = false;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioTrack in case we create a new session
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    {
        const jint elements[1] = { (jint) lpTrack->getSampleRate() };
        env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
    }

    {   // scope for the lock
        Mutex::Autolock l(sLock);
        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
    }
    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
    // of the Java object (in mNativeTrackInJavaObj)
    setAudioTrack(env, thiz, lpTrack);//将Native AudioTrack的執行個體位址儲存到Java AudioTrack::mNativeTrackInJavaObj字段中,綁定兩對象

    // save the JNI resources so we can free them later
    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

    // since we had audio attributes, the stream type was derived from them during the
    // creation of the native AudioTrack: push the same value to the Java object
    env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
    if (paa != NULL) {
        // audio attributes were copied in AudioTrack creation
        free(paa);
        paa = NULL;
    }


    return (jint) AUDIO_JAVA_SUCCESS;

    // failures:
native_init_failure:
    if (paa != NULL) {
        free(paa);
    }
    if (nSession != NULL) {
        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    }
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
    delete lpJniStorage;
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    // lpTrack goes out of scope, so reference count drops to zero
    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
           

重要地,首先會去建立Native AudioTrack和AudioTrackJniStorage對象執行個體。AudioTrack對象的無參構造較為簡單,隻會涉及一些變量、狀态的初始化操作:

//調用無參構造函數;初始化狀态和一些變量
AudioTrack::AudioTrack()
    : mStatus(NO_INIT), //目前AudioTrack的内部狀态
      mState(STATE_STOPPED), //目前Track的運作狀态
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT),
      mPausedPosition(0),
      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
    mAttributes.flags = 0x0;
    strcpy(mAttributes.tags, "");
}
           

而AudioTrackJniStorage類型更像是一個回調、工具類:

class AudioTrackJniStorage {
    public:
        sp<MemoryHeapBase>         mMemHeap;
        sp<MemoryBase>             mMemBase;
        audiotrack_callback_cookie mCallbackData;
        sp<JNIDeviceCallback>      mDeviceCallback;

    AudioTrackJniStorage() {
        mCallbackData.audioTrack_class = 0;//Java AudioTrack類型執行個體
        mCallbackData.audioTrack_ref = 0;//Java AudioTrack對象執行個體的弱引用
    }

    ~AudioTrackJniStorage() {
        mMemBase.clear();
        mMemHeap.clear();
    }

    bool allocSharedMem(int sizeInBytes) {
        mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
        if (mMemHeap->getHeapID() < 0) {
            return false;
        }
        mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
        return true;
    }
};
           
class JNIDeviceCallback: public AudioSystem::AudioDeviceCallback
{
public:
    JNIDeviceCallback(JNIEnv* env, jobject thiz, jobject weak_thiz, jmethodID postEventFromNative);
    ~JNIDeviceCallback();

    virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
                                     audio_port_handle_t deviceId);

private:
    void sendEvent(int event);

    jclass      mClass;     // Reference to AudioTrack/AudioRecord class
    jobject     mObject;    // Weak ref to AudioTrack/AudioRecord Java object to call on
    jmethodID   mPostEventFromNative; // postEventFromNative method ID.
};
           
struct audiotrack_callback_cookie {
    jclass      audioTrack_class;
    jobject     audioTrack_ref;
    bool        busy;
    Condition   cond;
};
           

其中MemoryHeapBase、MemoryBase是兩個Binder類,它們都使用Binder進行程序間通信,同時它們都與共享記憶體的操作有關。AudioTrack與AudioFlinger之間就是采用共享記憶體技術來互動音頻資料的。我們看下它們的具體類型定義:

class MemoryBase : public BnMemory 
{
public:
    MemoryBase(const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
    virtual ~MemoryBase();
    virtual sp<IMemoryHeap> getMemory(ssize_t* offset, size_t* size) const;

protected:
    size_t getSize() const { return mSize; }//傳回記憶體區域大小
    ssize_t getOffset() const { return mOffset; }
    const sp<IMemoryHeap>& getHeap() const { return mHeap; }//傳回建立共享記憶體的MemoryHeapBase執行個體

private:
    size_t          mSize;//共享記憶體的區域大小
    ssize_t         mOffset;//偏移量
    sp<IMemoryHeap> mHeap;//建立共享記憶體的MemoryHeapBase執行個體
};
           
class MemoryHeapBase : public virtual BnMemoryHeap
{
public:
    enum {
        READ_ONLY = IMemoryHeap::READ_ONLY,
        // memory won't be mapped locally, but will be mapped in the remote
        // process.
        DONT_MAP_LOCALLY = 0x00000100,
        NO_CACHING = 0x00000200
    };

    /*
     * maps the memory referenced by fd. but DOESN'T take ownership
     * of the filedescriptor (it makes a copy with dup()
     */
    MemoryHeapBase(int fd, size_t size, uint32_t flags = 0, uint32_t offset = 0);

    /*
     * maps memory from the given device
     */
    MemoryHeapBase(const char* device, size_t size = 0, uint32_t flags = 0);

    /*
     * maps memory from ashmem, with the given name for debugging
     */
    MemoryHeapBase(size_t size, uint32_t flags = 0, char const* name = NULL);

    virtual ~MemoryHeapBase();

    /* implement IMemoryHeap interface */
    virtual int         getHeapID() const;

    /* virtual address of the heap. returns MAP_FAILED in case of error */
    virtual void*       getBase() const;

    virtual size_t      getSize() const;
    virtual uint32_t    getFlags() const;
    virtual uint32_t    getOffset() const;

    const char*         getDevice() const;

    /* this closes this heap -- use carefully */
    void dispose();

    /* this is only needed as a workaround, use only if you know
     * what you are doing */
    status_t setDevice(const char* device) {
        if (mDevice == 0)
            mDevice = device;
        return mDevice ? NO_ERROR : ALREADY_EXISTS;
    }

protected:
            MemoryHeapBase();
    // init() takes ownership of fd
    status_t init(int fd, void *base, int size,
            int flags = 0, const char* device = NULL);

private:
    status_t mapfd(int fd, size_t size, uint32_t offset = 0);

    int         mFD;//共享記憶體的檔案描述符
    size_t      mSize;//建立的共享記憶體的大小
    void*       mBase;//建立共享記憶體的首位址
    uint32_t    mFlags;
    const char* mDevice;
    bool        mNeedUnmap;
    uint32_t    mOffset;//
};
           

AudioTrackJniStorage提供了建立共享記憶體的函數allocSharedMem():

bool allocSharedMem(int sizeInBytes) {
        mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
        if (mMemHeap->getHeapID() < 0) {
            return false;
        }
        mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
        return true;
    }
           

參數是指定的要建立的共享記憶體大小。由函數、類型定義可知,MemoryBase通過MemoryHeapBase可以擷取共享記憶體的具體資訊,MemoryHeapBase具體實作共享記憶體的建立操作。

分析MemoryHeapBase的建立過程:

MemoryHeapBase::MemoryHeapBase(size_t size, uint32_t flags, char const * name)
    : mFD(-1), mSize(0), mBase(MAP_FAILED), mFlags(flags),
      mDevice(0), mNeedUnmap(false), mOffset(0)
{
    const size_t pagesize = getpagesize();
    size = ((size + pagesize-1) & ~(pagesize-1));
    int fd = ashmem_create_region(name == NULL ? "MemoryHeapBase" : name, size);//libcutils提供,打開/dev/ashmem裝置,并獲得一個fd;建立一塊指定大小的記憶體.
    ALOGE_IF(fd<0, "error creating ashmem region: %s", strerror(errno));
    if (fd >= 0) {
        if (mapfd(fd, size) == NO_ERROR) {//mmap映射,得到記憶體位址;并儲存與該記憶體區域有關的資訊
            if (flags & READ_ONLY) {
                ashmem_set_prot_region(fd, PROT_READ);
            }
        }
    }
}
           

MemoryHeapBase的建立過程,實際就是某塊共享記憶體的建立、初始化過程。如果建立記憶體傳回的檔案描述符合法,還要将它映射到記憶體區域中:

status_t MemoryHeapBase::mapfd(int fd, size_t size, uint32_t offset)
{
    if (size == 0) {
        // try to figure out the size automatically
        struct stat sb;
        if (fstat(fd, &sb) == 0)
            size = sb.st_size;
        // if it didn't work, let mmap() fail.
    }

    if ((mFlags & DONT_MAP_LOCALLY) == 0) {
        void* base = (uint8_t*)mmap(0, size,
                PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
        if (base == MAP_FAILED) {
            ALOGE("mmap(fd=%d, size=%u) failed (%s)",
                    fd, uint32_t(size), strerror(errno));
            close(fd);
            return -errno;
        }
        //ALOGD("mmap(fd=%d, base=%p, size=%lu)", fd, base, size);
        mBase = base;
        mNeedUnmap = true;
    } else  {
        mBase = 0; // not MAP_FAILED
        mNeedUnmap = false;
    }
    mFD = fd;
    mSize = size;
    mOffset = offset;
    return NO_ERROR;
}
           

mapfd()函數中還會儲存這塊共享記憶體的一些資訊,如果它對應的檔案描述符、記憶體首位址等等。allocSharedMem()函數随後會建立MemoryBase對象:

MemoryBase::MemoryBase(const sp<IMemoryHeap>& heap,
        ssize_t offset, size_t size)
    : mSize(size), mOffset(offset), mHeap(heap)//初始化成員
{
}

sp<IMemoryHeap> MemoryBase::getMemory(ssize_t* offset, size_t* size) const
{
    if (offset) *offset = mOffset;//傳回記憶體位址偏移量
    if (size)   *size = mSize;//傳回記憶體區域的大小
    return mHeap;//傳回建立共享記憶體的MemoryHeapBase執行個體
}

MemoryBase::~MemoryBase()
{
}
           

它的建立很簡單,就是儲存之前建立的MemoryHeapBase執行個體。

傳回到android_media_AudioTrack_setup()函數,在建立完AudioTrack、AudioTrackJniStorage,并執行了一些初始化動作後,就會去執行AudioTrack::set()函數。該函數的調用與上層傳下的memoryMode有關,即跟STREAM/STATIC模式有關。兩種模式下,該函數的調用略有不同:

switch (memoryMode) {
        case MODE_STREAM:

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SYNC,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        case MODE_STATIC:
            // AudioTrack is using shared memory

            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        default:
            ALOGE("Unknown mode %d", memoryMode);
            goto native_init_failure;
        }
           

參考AudioTrack::set()的函數定義:

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed)
           

我們發現在STREAM模式下,我們不會傳入共享記憶體,此時所需的共享記憶體會由AudioFlinger建立;STATIC模式下,則會在先建立共享記憶體區域,并傳入它的MemoryBase執行個體。

AudioTrack::set()調用之後的處理都較為簡單,有一點需要注意的就是,這裡會把建立的Native AudioTrack執行個體與Java AudioTrack進行關聯:

setAudioTrack(env, thiz, lpTrack);//将Native AudioTrack的執行個體位址儲存到Java AudioTrack::mNativeTrackInJavaObj字段中
           
static sp<AudioTrack> setAudioTrack(JNIEnv* env, jobject thiz, const sp<AudioTrack>& at)
{
    Mutex::Autolock l(sLock);
    sp<AudioTrack> old =
            (AudioTrack*)env->GetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    if (at.get()) {
        at->incStrong((void*)setAudioTrack);
    }
    if (old != 0) {
        old->decStrong((void*)setAudioTrack);
    }
    env->SetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, (jlong)at.get());
    return old;
}
           

此時,Java AudioTrack儲存了一份Native AudioTrack執行個體的對象位址資訊;後續的操作,都會基于這樣一種Java/Native對象執行個體的一一對應關系來進行。其他的内容就不再贅述。

分析完該部分後,我們得到了一個重要的函數調用AudioTrack::set()。

三、native_start()

在建立完Java AudioTrack執行個體後,我們會調用play()函數準備進行播放了,最後會調用到native_start()函數,它對應的JNI實作是:

static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
{
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for start()");
        return;
    }

    lpTrack->start();
}
           
static sp<AudioTrack> getAudioTrack(JNIEnv* env, jobject thiz)
{
    Mutex::Autolock l(sLock);
    AudioTrack* const at =
            (AudioTrack*)env->GetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    return sp<AudioTrack>(at);
}
           

首先擷取到之前建立的Native AudioTrack執行個體,并調用它的start()函數。

這裡我們得到了一個重要的函數調用AudioTrack::start()。

四、native_write_byte()/native_write_short()

準備播放動作完成後,我們會向AudioTrack寫入資料,它對應的JNI函數實作是:

// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_write_native_bytes(JNIEnv *env,  jobject thiz,
        jbyteArray javaBytes, jint byteOffset, jint sizeInBytes,
        jint javaAudioFormat, jboolean isWriteBlocking) {
    //ALOGV("android_media_AudioTrack_write_native_bytes(offset=%d, sizeInBytes=%d) called",
    //    offsetInBytes, sizeInBytes);
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
                "Unable to retrieve AudioTrack pointer for write()");
        return (jint)AUDIO_JAVA_INVALID_OPERATION;
    }

    ScopedBytesRO bytes(env, javaBytes);
    if (bytes.get() == NULL) {
        ALOGE("Error retrieving source of audio data to play, can't play");
        return (jint)AUDIO_JAVA_BAD_VALUE;
    }

    jint written = writeToTrack(lpTrack, javaAudioFormat, bytes.get(), byteOffset,
            sizeInBytes, isWriteBlocking == JNI_TRUE /* blocking */);

    return written;
}
           

最終調用writeToTrack()函數:

// ----------------------------------------------------------------------------
template <typename T>
static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
                         jint offsetInSamples, jint sizeInSamples, bool blocking) {
    // give the data to the native AudioTrack object (the data starts at the offset)
    ssize_t written = 0;
    // regular write() or copy the data to the AudioTrack's shared memory?
    size_t sizeInBytes = sizeInSamples * sizeof(T);
    if (track->sharedBuffer() == 0) {//STREAM模式,要将資料一次次寫入Native AudioTrack中
        written = track->write(data + offsetInSamples, sizeInBytes, blocking);
        // for compatibility with earlier behavior of write(), return 0 in this case
        if (written == (ssize_t) WOULD_BLOCK) {
            written = 0;
        }
    } else {//STATIC模式,直接将資料拷貝至共享記憶體中
        // writing to shared memory, check for capacity
        if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
            sizeInBytes = track->sharedBuffer()->size();
        }
        memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);
        written = sizeInBytes;
    }
    if (written >= 0) {
        return written / sizeof(T);
    }
    return interpretWriteSizeError(written);
}
           

如果是STREAM模式,我們會持續地将資料寫入到Native AudioTrack中,這裡有一個重要的函數調用是AudioTrack::write();如果是STATIC模式,我們會一次性将資料拷貝到共享記憶體區域中。

五、natvie_stop()

當我們将資料寫入完畢、播放完成時,會調用stop函數停止AudioTrack的播放,此時該函數對應的JNI函數實作是:

// ----------------------------------------------------------------------------
static void
android_media_AudioTrack_stop(JNIEnv *env, jobject thiz)
{
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for stop()");
        return;
    }

    lpTrack->stop();
}
           

直接調用Native AudioTrack::stop()函數。

六、native_release()

AudioTrack對象由于會涉及到很多的系統資源,我們調用了AudioTrack::stop()函數後,還需調用realease()函數,釋放相關的系統資源,它對應的JNI函數實作是:

#define CALLBACK_COND_WAIT_TIMEOUT_MS 1000
static void android_media_AudioTrack_release(JNIEnv *env,  jobject thiz) {
    sp<AudioTrack> lpTrack = setAudioTrack(env, thiz, 0);
    if (lpTrack == NULL) {
        return;
    }
    //ALOGV("deleting lpTrack: %x\n", (int)lpTrack);

    // delete the JNI data
    AudioTrackJniStorage* pJniStorage = (AudioTrackJniStorage *)env->GetLongField(
        thiz, javaAudioTrackFields.jniData);
    // reset the native resources in the Java object so any attempt to access
    // them after a call to release fails.
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    if (pJniStorage) {
        Mutex::Autolock l(sLock);
        audiotrack_callback_cookie *lpCookie = &pJniStorage->mCallbackData;
        //ALOGV("deleting pJniStorage: %x\n", (int)pJniStorage);
        while (lpCookie->busy) {
            if (lpCookie->cond.waitRelative(sLock,
                                            milliseconds(CALLBACK_COND_WAIT_TIMEOUT_MS)) !=
                                                    NO_ERROR) {
                break;
            }
        }
        sAudioTrackCallBackCookies.remove(lpCookie);
        // delete global refs created in native_setup
        env->DeleteGlobalRef(lpCookie->audioTrack_class);
        env->DeleteGlobalRef(lpCookie->audioTrack_ref);
        delete pJniStorage;
    }
}
           

函數處理很直覺,就是完成對之前setup()中建立的對象的清理動作。

到這裡,AudioTrack JNI部分的分析就結束了,從上面的分析流程可以看出,之後的工作區間會在Native AudioTrack中。我們整理下分析過程結束後所涉及到的幾個重要函數調用:

  • AudioTrack::set()
  • AudioTrack::start()
  • AudioTrack::write()
  • AudioTrack::stop(

是以,下一步,我們會進入Native AudioTrack空間,繼續我們的分析流程。