播放器开发(六):音频帧处理并用SDL播放
目录
学习课题:逐步构建开发播放器【QT5 + FFmpeg6 + SDL2】
步骤
AudioOutPut模块
1、初始化【分配缓存、读取信息】
2、开始线程工作【从队列读帧->重采样->SDL回调->写入音频播放数据->SDL进行播放】
主要代码
分配缓存
// 对于样本队列
av_audio_fifo_alloc(playSampleFmt, playChannels, spec.samples * 5);
// 对于帧的音频字节数据
// 首次计算帧大小,并且开辟缓冲区
maxOutSamples = (int) av_rescale_rnd(decCtxSamples, playSampleRate, srcSampleRate, AV_ROUND_UP);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, maxOutSamples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
重采样相关
//配置重采样器参数
swr_alloc_set_opts2(&swrContext,
&srcChannelLayout, playSampleFmt, playSampleRate,
&srcChannelLayout, AVSampleFormat(srcSampleFmt), srcSampleRate,
0, nullptr);
//初始化重采样器
swr_init(swrContext);
//重采样流程
// 计算重采样后要输出多少样本数
delay = swr_get_delay(swrContext, sample_rate);
out_samples = (int) av_rescale_rnd(
nb_samples + delay,
playSampleRate,
sample_rate,
AV_ROUND_DOWN);
// 判断预测的输出样本数是否>本次任务的最大样本数
if (out_samples > maxOutSamples) {
// 释放缓冲区,重新初始化缓冲区大小
av_freep(&audioBuffer);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, out_samples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
maxOutSamples = out_samples;
}
playSamples = swr_convert(swrContext, &audioBuffer, out_samples, (const uint8_t **) frame->data, nb_samples);
SDL的音频回调
// SDL音频回调函数提供了一个回调接口,可以让我们在音频设备需要数据的时候向里面写入数据
// 从而进行声音播放
// 回调函数示例 函数名自定义,放在类中需要加静态(static)
void AudioOutPut::AudioCallBackFunc(void *userdata, Uint8 *stream, int len) {
//userdata 是在初始化时赋值的,有时候会把类中"this"传进去
//stream 是音频流,在回调函数中需要把音频数据写入到stream就可以实现声音播放
//len是由SDL传入的SDL缓冲区的大小,如果这个缓冲未满,我们就一直往里填充数据
...
}
完整模块
AudioOutPut
//AudioOutPut.h
#include "FFmpegHeader.h"
#include "SDL.h"
#include "queue/AVFrameQueue.h"
#include <QDebug>
#include <QObject>
#include <QtGui>
#include <QtWidgets>
#include <thread>
class AudioOutPut {
private:
std::thread *m_thread;
bool isStopped = true; // 是否已经停止 停止时退出线程
bool isPlaying = false;// 是否正在播放
bool isPause = false; // 是否暂停
void run();
int resampleFrame(AVFrame *frame);
int sdlCallBackMode = 1;
QString url; //视频地址
uint8_t *audioBuffer; //存储解码后音频buffer
int audioBufferSize = 0;//buffer大小
int audioBufferIndex = 0;
SDL_mutex *mtx = nullptr;// 队列锁
SDL_AudioDeviceID audioDevice;
AVAudioFifo *fifo = nullptr;//Audio Buffer
AVFrameQueue *frameQueue; //解码后的帧队列
SwrContext *swrContext; //重采样上下文
// 解码器上下文
AVCodecContext *decCtx; // 音频解码器上下文
int srcChannels; // 源通道数
AVChannelLayout srcChannelLayout;// 源通道布局
enum AVSampleFormat srcSampleFmt;// 源采样格式
int srcSampleRate; // 源音频采样率
// player
int maxOutSamples; // 最大样本数,用于计算缓存区大小
int playSamples; // 最终播放的样本数
int playSampleRate;// 最终播放的音频采样率
enum AVSampleFormat playSampleFmt;
int playChannels;// 源通道数
public:
AudioOutPut(AVCodecContext *dec_ctx, AVFrameQueue *frame_queue);
int init(int mode = 1);
static void AudioCallBackFunc(void *userdata, Uint8 *stream, int len);
//SDL音频回调函数实体普通版
void AudioCallBack(Uint8 *stream, int len);
//SDL音频回调函数实体队列版
void AudioCallBackFromQueue(Uint8 *stream, int len);
int start();
};
//AudioOutPut.cpp
#include "AudioOutPut.h"
AudioOutPut::AudioOutPut(AVCodecContext *dec_ctx, AVFrameQueue *frame_queue)
: decCtx(dec_ctx), frameQueue(frame_queue) {
srcSampleFmt = decCtx->sample_fmt;
srcSampleRate = decCtx->sample_rate;
srcChannelLayout = decCtx->ch_layout;
srcChannels = srcChannelLayout.nb_channels;
}
int AudioOutPut::init(int mode) {
sdlCallBackMode = mode;
// SDL init
if (SDL_Init(SDL_INIT_AUDIO) != 0) {
qDebug() << "SDL_INIT_AUDIO error";
return -1;
}
SDL_AudioSpec wanted_spec, spec;
wanted_spec.channels = decCtx->ch_layout.nb_channels;
wanted_spec.freq = decCtx->sample_rate;
SDL_AudioFormat sample_type;
switch (srcSampleFmt) {
case AV_SAMPLE_FMT_FLTP:
case AV_SAMPLE_FMT_FLT:
sample_type = AUDIO_F32SYS;
break;
case AV_SAMPLE_FMT_U8P:
case AV_SAMPLE_FMT_U8:
sample_type = AUDIO_U8;
break;
case AV_SAMPLE_FMT_S64P:
case AV_SAMPLE_FMT_S64:
case AV_SAMPLE_FMT_S32P:
case AV_SAMPLE_FMT_S32:
sample_type = AUDIO_S32SYS;
break;
case AV_SAMPLE_FMT_S16P:
case AV_SAMPLE_FMT_S16:
sample_type = AUDIO_S16SYS;
break;
default:
sample_type = AUDIO_S16SYS;
qDebug() << "不支持的采样格式:AVSampleFormat(" << srcSampleFmt << ")";
}
wanted_spec.format = sample_type;
wanted_spec.silence = 0;
wanted_spec.callback = AudioCallBackFunc;
wanted_spec.userdata = this;
wanted_spec.samples = decCtx->frame_size;
int ret;
// ret = SDL_OpenAudio(&wanted_spec, &spec);
audioDevice = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_ANY_CHANGE);
if (audioDevice == 0) {
qDebug() << "SDL_OpenAudio error";
return -1;
}
playChannels = spec.channels;
playSampleRate = spec.freq;
playSampleFmt = av_get_packed_sample_fmt(srcSampleFmt);
if (mode == 1) {
fifo = av_audio_fifo_alloc(playSampleFmt, playChannels, spec.samples * 5);
}
ret = swr_alloc_set_opts2(&swrContext,
&srcChannelLayout, playSampleFmt, playSampleRate,
&srcChannelLayout, AVSampleFormat(srcSampleFmt), srcSampleRate,
0, nullptr);
if (ret != 0) {
qDebug() << "swr_alloc_set_opts2错误";
return -1;
}
if (!swrContext) {
qDebug() << "创建音频重采样上下文错误 swr_alloc";
return -1;
}
ret = swr_init(swrContext);
if (ret < 0) {
qDebug() << "初始化音频重采样上下文错误 swr_init";
return -1;
}
// 解码器上下文保存的帧样本数
int decCtxSamples = 1024;
if (decCtx->frame_size > 1024) {
decCtxSamples = decCtx->frame_size;
}
// 首次计算帧大小,并且开辟缓冲区
maxOutSamples = (int) av_rescale_rnd(decCtxSamples, playSampleRate, srcSampleRate, AV_ROUND_UP);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, maxOutSamples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
return 1;
}
void AudioOutPut::AudioCallBackFunc(void *userdata, Uint8 *stream, int len) {
AudioOutPut *player = (AudioOutPut *) userdata;
if (player->sdlCallBackMode == 1) {
player->AudioCallBackFromQueue(stream, len);
} else {
player->AudioCallBack(stream, len);
}
}
void AudioOutPut::AudioCallBack(Uint8 *stream, int len) {
int len1;// sdl的内部stream可用空间
/* len是由SDL传入的SDL缓冲区的大小,如果这个缓冲未满,我们就一直往里填充数据 */
while (len > 0) {
/* audioBufferIndex 和 audioBufferSize 标示我们自己用来放置解码出来的数据的缓冲区,*/
/* 这些数据待copy到SDL缓冲区, 当audioBufferIndex >= audioBufferSize的时候意味着我*/
/* 们的缓冲为空,没有数据可供copy,这时候需要调用audio_decode_frame来解码出更多的桢数据 */
if (audioBufferIndex >= audioBufferSize) {
AVFrame *frame = frameQueue->pop(10);
if (frame) {
audioBufferSize = resampleFrame(frame);
/* audioBufferSize < 0 标示没能解码出数据,我们默认播放静音 */
if (audioBufferSize <= 0) {
/* silence */
audioBufferSize = 1024;
/* 清零,静音 */
memset(audioBuffer, 0, audioBufferSize);
}
}
audioBufferIndex = 0;
}
/* 当audioBufferIndex < audioBufferSize 查看stream可用空间,决定一次copy多少数据,剩下的下次继续copy */
len1 = audioBufferSize - audioBufferIndex;
// 可用空间>
if (len1 > len) {
len1 = len;
}
if (audioBuffer == nullptr) return;
memcpy(stream, (uint8_t *) audioBuffer + audioBufferIndex, len1);
len -= len1;
stream += len1;
audioBufferIndex += len1;
}
}
void AudioOutPut::AudioCallBackFromQueue(Uint8 *stream, int len) {
//由于AVAudioFifo非线程安全,且是子线程触发此回调,所以需要加锁
SDL_LockMutex(mtx);
//读取队列中的音频数据
av_audio_fifo_read(fifo, (void **) &stream, playSamples);
SDL_UnlockMutex(mtx);
}
int AudioOutPut::start() {
SDL_PauseAudioDevice(audioDevice, 0);
// SDL_PauseAudio(0);
if (sdlCallBackMode == 1) {
m_thread = new std::thread(&AudioOutPut::run, this);
if (!m_thread->joinable()) {
qDebug() << "AudioOutPut音频帧处理线程创建失败";
return -1;
}
}
isStopped = false;
isPlaying = true;
return 0;
}
void AudioOutPut::run() {
AVFrame *frame;
while (!isStopped) {
frame = frameQueue->pop(10);
if (frame) {
audioBufferSize = resampleFrame(frame);
while (true) {
SDL_LockMutex(mtx);
if (av_audio_fifo_space(fifo) >= playSamples) {
av_audio_fifo_write(fifo, (void **) &audioBuffer, playSamples);
SDL_UnlockMutex(mtx);
av_frame_unref(frame);
break;
}
SDL_UnlockMutex(mtx);
//队列可用空间不足则延时等待
SDL_Delay((double) playSamples / playSampleRate);
}
}
}
}
int AudioOutPut::resampleFrame(AVFrame *frame) {
int64_t delay; // 重采样后延迟
int out_samples;// 预测的重采样后的输出样本数
int sample_rate;// 帧原采样率
int nb_samples; // 帧原样本数
sample_rate = frame->sample_rate;
nb_samples = frame->nb_samples;
// 计算重采样后要输出多少样本数
delay = swr_get_delay(swrContext, sample_rate);
out_samples = (int) av_rescale_rnd(
nb_samples + delay,
playSampleRate,
sample_rate,
AV_ROUND_DOWN);
// 判断预测的输出样本数是否>本次任务的最大样本数
if (out_samples > maxOutSamples) {
// 释放缓冲区,重新初始化缓冲区大小
av_freep(&audioBuffer);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, out_samples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
maxOutSamples = out_samples;
}
playSamples = swr_convert(swrContext, &audioBuffer, out_samples, (const uint8_t **) frame->data, nb_samples);
if (playSamples <= 0) {
return -1;
}
return av_samples_get_buffer_size(nullptr, srcChannels, playSamples, playSampleFmt, 1);
}
PlayerMain
添加音频输出代码
AudioOutPut *audioOutPut;
audioOutPut = new AudioOutPut(audioDecodeThread->dec_ctx, &audioFrameQueue);
audioOutPut->init(1);
audioOutPut->start();
测试运行结果
如果需要同时执行视频和音频的输出,记得要在解复用模块那把限制队列大小的位置把视频队列的大小限制给去掉。
目前只是实现了音频播放和视频渲染显示画面,但是可以看到音频和视频是不同步的,下一章我们就要让音频和视频同步起来。
播放器开发(六):音频帧处理并用SDL播放结果