my open音频的

extern "C"{
    #include <stdio.h>
    #include <stdlib.h>
#include <unistd.h>
}

#include <string>
#include <memory>
#include <jni.h>
#include <android/log.h>
#include <android/native_window_jni.h>

#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>

using namespace std;
extern "C" {
    ///#include <libavcodec/avcodec.h>
    #include "libavutil/opt.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/common.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/time.h"
    #include "libavutil/fifo.h"
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libavformat/avio.h"
    //#include "libavfilter/avfiltergraph.h"
    #include "libavfilter/avfilter.h"
    #include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"
    #include "libswscale/swscale.h"
    #include "libswresample/swresample.h"
#define TAG "zj"

#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)

}

int mymain(        JNIEnv* env,jobject surface);
extern "C" JNIEXPORT jstring JNICALL
Java_com_example_nativeffmpeg_MainActivity_stringFromJNI(
        JNIEnv* env,
        jobject obj /* this */) {
    std::string hello = "Hello from C++19";
    //
    mymain(env, obj);
    return env->NewStringUTF(hello.c_str());
    //return env->NewStringUTF(av_version_info());
}

#define INBUF_SIZE 4096

char *spath ="data/data/com.example.nativeffmpeg/single.pcm";
//char *spath ="data/data/com.example.nativeffmpeg/NocturneNo2inEflat_44.1k_s16le.pcm";
//char *spath ="/sdcard/Download/test1.mp4";

extern "C" JNIEXPORT void JNICALL
Java_com_example_nativeffmpeg_MainActivity_playRgb(
        JNIEnv* env,
        jobject  /* this */,
        jobject surface)
//void playVedio( JNIEnv* env,jobject surface)
{
   // mymain(env, surface);
}
static char *buf = NULL;
void bufferQueueCallback(SLAndroidSimpleBufferQueueItf bufferQueue, void *pContext) {
    static FILE *fp = NULL;


    if (!fp) {
        fp = fopen(spath, "rb");
        LOGD("fopen %s ---------------------------------->\n",spath);
    }
    if (!fp) return;
    if (feof(fp) == 0) {
        int len = fread(buf, 1, 44100*2*2, fp);
        if (len > 0)
            (*bufferQueue)->Enqueue(bufferQueue, buf, len);
        LOGD("fread len %d ------------------------------->\n",len);
    }
}

int mymain(JNIEnv* env, jobject object)
{
    if (!buf) {
        buf = new char[44100*2*2];
    }
    std::string hello = "Hello from C++";

    SLresult re;
    SLObjectItf engineObject;
    SLEngineItf slAudioEngine;

    // 1. Create and init audio engine
    re = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("slCreateEngine() failed");
    }
    re = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("engineObject Realize failed");
    }
    re = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &slAudioEngine);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("engineObject GetInterface SL_IID_ENGINE failed");
    }

    // 2. Set output mix
    SLObjectItf outputMix;
    re = (*slAudioEngine)->CreateOutputMix(slAudioEngine, &outputMix, 0, NULL, NULL);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("CreateOutputMix() failed");
    }
    re = (*outputMix)->Realize(outputMix, SL_BOOLEAN_FALSE);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("outputMix Realize failed");
    }

    // 3. Configuring the input data source
    SLDataLocator_AndroidSimpleBufferQueue inputBuffQueueLocator = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 10};
    SLDataFormat_PCM input_format_pcm = {
            SL_DATAFORMAT_PCM,                              // <<< 输入的音频格式,PCM
            2,                                              // <<< 输入的声道数,2(立体声)
            SL_SAMPLINGRATE_44_1,                           // <<< 输入的采样率,44100hz
            SL_PCMSAMPLEFORMAT_FIXED_16,                    // <<< 输入的采样位数,16bit
            SL_PCMSAMPLEFORMAT_FIXED_16,                    // <<< 容器大小,同上
            SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT,   // <<< 声道标记,这里使用左前声道和右前声道
            SL_BYTEORDER_LITTLEENDIAN                       // <<< 输入的字节序,小端
    };
    SLDataSource dataSource = {&inputBuffQueueLocator, &input_format_pcm};

    SLDataLocator_OutputMix outputMixLocator = {SL_DATALOCATOR_OUTPUTMIX, outputMix};
    SLDataSink dataSink = {&outputMixLocator, 0};

    // 4. Create Audio Player
    SLObjectItf audioPlayer;
    SLAndroidSimpleBufferQueueItf pcmBufferQueue;
    SLPlayItf playInterface;
    SLInterfaceID audioPlayerInterfaceIDs[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
    SLboolean audioPlayerInterfaceRequired[] = {SL_BOOLEAN_TRUE};

    re = (*slAudioEngine)->CreateAudioPlayer(slAudioEngine, &audioPlayer, &dataSource, &dataSink, 1, audioPlayerInterfaceIDs, audioPlayerInterfaceRequired);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("CreateAudioPlayer() failed");
    }
    re = (*audioPlayer)->Realize(audioPlayer, SL_BOOLEAN_FALSE);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("AudioPlayer Realize failed");
    }
    re = (*audioPlayer)->GetInterface(audioPlayer, SL_IID_PLAY, &playInterface);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("AudioPlayer GetInterface SL_IID_PLAY failed");
    }
    re = (*audioPlayer)->GetInterface(audioPlayer, SL_IID_BUFFERQUEUE, &pcmBufferQueue);
    if (re != SL_RESULT_SUCCESS) {
        LOGD("AudioPlayer GetInterface SL_IID_BUFFERQUEUE failed");
    }

    (*pcmBufferQueue)->RegisterCallback(pcmBufferQueue, bufferQueueCallback, NULL);
    (*playInterface)->SetPlayState(playInterface, SL_PLAYSTATE_PLAYING);

    // Start queue callback
    (*pcmBufferQueue)->Enqueue(pcmBufferQueue, "", 1);
    //return env->NewStringUTF(hello.c_str());
    return 0;
}

 原来视的

extern "C"{
    #include <stdio.h>
    #include <stdlib.h>
#include <unistd.h>
}

#include <string>
#include <memory>
#include <jni.h>
#include <android/log.h>
#include <android/native_window_jni.h>
using namespace std;
extern "C" {
    ///#include <libavcodec/avcodec.h>
    #include "libavutil/opt.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/common.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/time.h"
    #include "libavutil/fifo.h"
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libavformat/avio.h"
    //#include "libavfilter/avfiltergraph.h"
    #include "libavfilter/avfilter.h"
    #include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"
    #include "libswscale/swscale.h"
    #include "libswresample/swresample.h"
#define TAG "zj"

#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)

}


extern "C" JNIEXPORT jstring JNICALL
Java_com_example_nativeffmpeg_MainActivity_stringFromJNI(
        JNIEnv* env,
        jobject /* this */) {
    std::string hello = "Hello from C++";
    //return env->NewStringUTF(hello.c_str());
    return env->NewStringUTF(av_version_info());
}
int mymain(        JNIEnv* env,
                   jobject surface);
#define INBUF_SIZE 4096

char *spath ="data/data/com.example.nativeffmpeg/test1.mp4";
//char *spath ="/sdcard/Download/test1.mp4";

extern "C" JNIEXPORT void JNICALL
Java_com_example_nativeffmpeg_MainActivity_playRgb(
        JNIEnv* env,
        jobject  /* this */,
        jobject surface)
//void playVedio( JNIEnv* env,jobject surface)
{
    mymain(env, surface);
}
int videoIndex = -1;
int audioIndex = -1;

AVInputFormat mFormat;
AVDictionary* iformat_opts;
void Init()
{
    av_register_all();
    avfilter_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_ERROR);
}
AVFormatContext *ic = NULL;
int64_t lastReadPacktTime;
std::shared_ptr <AVPacket> readPacketFromSource()
{
    std::shared_ptr<AVPacket> packet(static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))), [&](AVPacket *p) { av_packet_free(&p); av_freep(&p); });
    av_init_packet(packet.get());
    lastReadPacktTime = av_gettime();
    int ret = av_read_frame(ic, packet.get());
    if (ret >= 0)
    {
        return packet;
    }
    else
    {
        return nullptr;
    }

}
bool videoDecode(AVPacket* packet, AVFrame *frame)
{
    int gotFrame = 0;
    //videoIndex
    auto hr = avcodec_decode_video2(ic->streams[videoIndex]->codec, frame, &gotFrame, packet);

    int pmt = ic->streams[videoIndex]->codec->pix_fmt;
    if (hr >= 0 && gotFrame != 0)
    {
        return true;
    }
    return false;
}

int initVideoDecodeContext()
{
    if(-1 == videoIndex) return -1;
    auto codecId = ic->streams[videoIndex]->codec->codec_id;
    auto codec = avcodec_find_decoder(codecId);
    if (!codec)
    {
        return -1;
    }

    int ret = avcodec_open2(ic->streams[videoIndex]->codec, codec, NULL);
    return ret;
}
int64_t lastDts;
#define DelayTime 5

int mymain(        JNIEnv* env,
                   jobject surface){

    Init();
    ic = avformat_alloc_context();
    int ret;
    if (!ic) {
        av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
        ret = AVERROR(ENOMEM);
        LOGE("alloc err %d\n", ret);

    }
    /*
    if (!av_dict_get(iformat_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
    av_dict_set(&iformat_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
    scan_all_pmts_set = 1;
    }
    */
    int err = avformat_open_input(&ic, spath, nullptr, nullptr);
    if (err < 0) {
        LOGE("open err err=%d\n", err);
    }
    LOGE("come 2\n");

    ret = avformat_find_stream_info(ic, nullptr);
    if (ret < 0)
    {
        LOGE(NULL, AV_LOG_ERROR, "Find input file stream inform failed\n");
    }
    else
    {
        for (int i = 0; i < ic->nb_streams; i++)
        {
            if (ic->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO)
            {
                videoIndex = i;
            }
            else if (ic->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO)
            {
                audioIndex = i;
            }
        }
        //playState = CGPlayState::POpen;
        //av_log(NULL, AV_LOG_FATAL, "Open input file  %s success\n", inputUrl.c_str());
    }
    int ret1 = initVideoDecodeContext();
    LOGE("ret1 = %d\n", ret1);
    ret = initVideoDecodeContext();
    if (ret < 0) return ret;

    int width  = ic->streams[videoIndex]->codec->width;
    int height = ic->streams[videoIndex]->codec->height;
    AVFrame * videoFrame = av_frame_alloc();
    SwsContext *vctx = NULL;
    int outWidth = 1920;
    int outHeight = 1080;
    char *rgb = new char[outWidth * outHeight * 4];
    char *pcm = new char[48000 * 4 * 2];
    //显示窗口初始化
    ANativeWindow *nwin = ANativeWindow_fromSurface(env, surface);
    ANativeWindow_setBuffersGeometry(nwin, outWidth, outHeight, WINDOW_FORMAT_RGBA_8888);
    ANativeWindow_Buffer wbuf;

    for (int i = 0; i <1570; i++) {
        auto packet = readPacketFromSource();
        if (packet) {
            if (packet->stream_index == videoIndex){
                if (videoDecode(packet.get(), videoFrame))
                {
                    AVFrame *frame= videoFrame;
                    vctx = sws_getCachedContext(vctx,
                                                frame->width,
                                                frame->height,
                                                (AVPixelFormat) frame->format,
                                                outWidth,
                                                outHeight,
                                                AV_PIX_FMT_RGBA,
                                                //AV_PIX_FMT_ABGR,
                                                SWS_FAST_BILINEAR,
                                                0, 0, 0
                    );
                    if (!vctx) {
                        LOGE("sws_getCachedContext failed!");
                    } else {
                        uint8_t *data[AV_NUM_DATA_POINTERS] = {0};
                        data[0] = (uint8_t *) rgb;
                        int lines[AV_NUM_DATA_POINTERS] = {0};
                        lines[0] = outWidth * 4;
                        int h = sws_scale(vctx,
                                          (const uint8_t **) frame->data,
                                          frame->linesize, 0,
                                          frame->height,
                                          data, lines);
                        //LOGE("mysws_scale = %d %d", h,i);

                        if (h > 0) {
                            // 绘制
                            ANativeWindow_lock(nwin, &wbuf, 0);
                            uint8_t *dst = (uint8_t *) wbuf.bits;
                            memcpy(dst, rgb, outWidth * outHeight * 4);
                            ANativeWindow_unlockAndPost(nwin);
                            //sleep(1);
                        }
                    }
                }
            }
        }
    }

    av_frame_free(&videoFrame);
    return 0;
}

 

my open音频的

上一篇:gitlab的CI/CD实现


下一篇:shell逻辑运算总结,包括[[]]与[]的区别,&&与-a的区别,||与-o的区别