[ffmpeg] 视频格式转换
本文主要梳理 ffmpeg 中的视频格式转换。由于上屏的数据是 rgba,编码使用的是 yuv数据,所以经常会使用到视频格式的转换。
除了使用 ffmpeg进行转换,还可以通过 libyuv 和 directX 写 shader 进行转换。
之前看到文章说 libyuv 之前是 ffmpeg 的一部分,后面独立出去了,好像 libyuv 的效率会高一点,没有实测过,后面可以对比一下。
API 调用
常用 API
struct SwsContext *sws_alloc_context(void);
int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
void sws_freeContext(struct SwsContext *swsContext);
struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param);
struct SwsContext *sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param);
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]);
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src);
初始化和销毁相关
sws_alloc_context 创建 context 上下文结构体
sws_init_context 初始化 context 结构体(不推荐使用)
sws_freeContext 销毁结构体
SwsContext class 定义 libswscale\options.c
const AVClass ff_sws_context_class = {
.class_name = "SWScaler",
.item_name = sws_context_to_name,
.option = swscale_options,
.parent_log_context_offset = OFFSET(parent),
.category = AV_CLASS_CATEGORY_SWSCALER,
.version = LIBAVUTIL_VERSION_INT,
};
sws_init_context 之前需要配置 context 一些参数,才能正确初始化。
context = sws_alloc_context()
context->srcW = srcW;
context->srcH = srcH;
context->srcFormat = srcFormat;
context->dstW = dstW;
context->dstH = dstH;
context->dstFormat = dstFormat;
context->flags = flags;
context->param[0] = param[0];
context->param[1] = param[1];
sws_init_context(context, srcFilter, dstFilter)
为了简化调用所以有了 sws_getContext 接口,其主要就是做了 1. sws_alloc_context 调用;2.参数设置;3.sws_init_context 调用。
sws_getCachedContext 在sws_getContext 基础上加了 context 的判断,如果之前使用的 context 和本次的参数都一样,则复用之前的 context,否则销毁重新创建。
if (context && (context->srcW != srcW || context->srcH != srcH || context->srcFormat != srcFormat || context->dstW != dstW || context->dstH != dstH || context->dstFormat != dstFormat || context->flags != flags || context->param[0] != param[0] || context->param[1] != param[1]))
类型转换
sws_scale
sws_scale_frame
输出结果是直接写在输入的内存上的,索引 data 需要提前分配好内存
demo 调用
m_vsc = sws_getCachedContext(m_vsc,
m_inWidth, m_inHeight, (AVPixelFormat)m_inPixFormat,
m_outWidth, m_outHeight, AV_PIX_FMT_YUV420P,
SWS_BICUBIC,
NULL, NULL, NULL);
m_yuv = av_frame_alloc();
m_yuv->format = AV_PIX_FMT_YUV420P;
m_yuv->width = m_outWidth;
m_yuv->height = m_outHeight;
m_yuv->pts = 0;
int ret = av_frame_get_buffer(m_yuv, 32);
uint8_t* indata[AV_NUM_DATA_POINTERS] = { 0 };
indata[0] = (uint8_t*)rgb;
int insize[AV_NUM_DATA_POINTERS] = { 0 };
insize[0] = m_inWidth * 4;
int h = sws_scale(m_vsc, indata, insize, 0, m_inHeight,
m_yuv->data, m_yuv->linesize);
if (m_vsc)
{
sws_freeContext(m_vsc);
m_vsc = NULL;
}
其他
所有接口
unsigned swscale_version(void);
const char *swscale_configuration(void);
const char *swscale_license(void);
const int *sws_getCoefficients(int colorspace);
int sws_isSupportedInput(enum AVPixelFormat pix_fmt);
int sws_isSupportedOutput(enum AVPixelFormat pix_fmt);
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);
struct SwsContext *sws_alloc_context(void);
int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
void sws_freeContext(struct SwsContext *swsContext);
struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src);
int sws_frame_start(struct SwsContext *c, AVFrame *dst, const AVFrame *src);
void sws_frame_end(struct SwsContext *c);
int sws_send_slice(struct SwsContext *c, unsigned int slice_start,
unsigned int slice_height);
int sws_receive_slice(struct SwsContext *c, unsigned int slice_start,
unsigned int slice_height);
unsigned int sws_receive_slice_alignment(const struct SwsContext *c);
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
int srcRange, const int table[4], int dstRange,
int brightness, int contrast, int saturation);
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
int *srcRange, int **table, int *dstRange,
int *brightness, int *contrast, int *saturation);
SwsVector *sws_allocVec(int length);
SwsVector *sws_getGaussianVec(double variance, double quality);
void sws_scaleVec(SwsVector *a, double scalar);
void sws_normalizeVec(SwsVector *a, double height);
void sws_freeVec(SwsVector *a);
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
float lumaSharpen, float chromaSharpen,
float chromaHShift, float chromaVShift,
int verbose);
void sws_freeFilter(SwsFilter *filter);
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
const AVClass *sws_get_class(void);
class 类型
typedef enum {
AV_CLASS_CATEGORY_NA = 0,
AV_CLASS_CATEGORY_INPUT,
AV_CLASS_CATEGORY_OUTPUT,
AV_CLASS_CATEGORY_MUXER,
AV_CLASS_CATEGORY_DEMUXER,
AV_CLASS_CATEGORY_ENCODER,
AV_CLASS_CATEGORY_DECODER,
AV_CLASS_CATEGORY_FILTER,
AV_CLASS_CATEGORY_BITSTREAM_FILTER,
AV_CLASS_CATEGORY_SWSCALER,
AV_CLASS_CATEGORY_SWRESAMPLER,
AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
AV_CLASS_CATEGORY_DEVICE_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_INPUT,
AV_CLASS_CATEGORY_NB ///< not part of ABI/API
}AVClassCategory;