uvc预览分析
很久没看uvc代码了,这里看了下,接着上面的uvc流程,见链接:uvc camera介绍,
然后继续分析后续预览流程。
这里主要分析cpp部分。
从UVCCamera.java 的startPreview开始,就进入了cpp部分
public synchronized void startPreview() {
if (mCtrlBlock != null) {
nativeStartPreview(mNativePtr);
}
}
其中nativeStartPreview(mNativePtr);方法的定义见下:
private static final native int nativeStartPreview(final long id_camera);
然后调用的是UVCCamera的startPreview
int UVCCamera::startPreview() {
ENTER();
int result = EXIT_FAILURE;
if (mDeviceHandle) {
return mPreview->startPreview();
}
RETURN(result, int);
}
可以看到 实际上调用的是mPreview->startPreview()方法。
点进去:
int UVCPreview::startPreview() {
ENTER();
int result = EXIT_FAILURE;
if (!isRunning()) {
mIsRunning = true;
pthread_mutex_lock(&preview_mutex);
{
if (LIKELY(mPreviewWindow)) {
result = pthread_create(&preview_thread, NULL, preview_thread_func, (void *)this);
}
}
pthread_mutex_unlock(&preview_mutex);
if (UNLIKELY(result != EXIT_SUCCESS)) {
LOGW("UVCCamera::window does not exist/already running/could not create thread etc.");
mIsRunning = false;
pthread_mutex_lock(&preview_mutex);
{
pthread_cond_signal(&preview_sync);
}
pthread_mutex_unlock(&preview_mutex);
}
}
RETURN(result, int);
}
可以看到这里创建了一个线程:
pthread_create(&preview_thread, NULL, preview_thread_func, (void *)this)
当然在线程创建的时候,开始了线程同步锁,锁定了preview_mutex,如果线程没创建成功,那么给preview_sync发送一个信号,并且解锁:preview_mutex,这个比较重要,是保证线程同步的关键。后续会有很多类似的地方。
这里接着分析创建线程执行的方法:preview_thread_func
void *UVCPreview::preview_thread_func(void *vptr_args) {
int result;
ENTER();
UVCPreview *preview = reinterpret_cast<UVCPreview *>(vptr_args);
if (LIKELY(preview)) {
uvc_stream_ctrl_t ctrl;
result = preview->prepare_preview(&ctrl);
if (LIKELY(!result)) {
preview->do_preview(&ctrl);
}
}
PRE_EXIT();
pthread_exit(NULL);
}
可以看到这个方法主要执行了两个方法:
preview->prepare_preview(&ctrl);
preview->do_preview(&ctrl);
以下主要分析这俩方法的执行过程,为了方便说明,不在二层和三层之间调用迷糊,
简单整理了下图,方便查看对照:
从红色方法开始调用,然后调用了俩黑色方法,黑色的第一个方法,
调用类绿色的三个方法。
黑色的第二个方法,调用了橙色的7个方法。而橙色的第一个方法,调用了蓝色的两个方法。
可以看到这两个方法都传入了参数&ctrl:这个对象的定义是在libuvc.h中,可以看到这个对象定义了流的模式及参数配置等,是一个控制对象:
/** Streaming mode, includes all information needed to select stream
* @ingroup streaming
*/
typedef struct uvc_stream_ctrl {
uint16_t bmHint;
uint8_t bFormatIndex;
uint8_t bFrameIndex;
uint32_t dwFrameInterval;
uint16_t wKeyFrameRate;
uint16_t wPFrameRate;
uint16_t wCompQuality;
uint16_t wCompWindowSize;
uint16_t wDelay;
uint32_t dwMaxVideoFrameSize;
uint32_t dwMaxPayloadTransferSize;
/** XXX add UVC 1.1 parameters */
uint32_t dwClockFrequency;
uint8_t bmFramingInfo;
uint8_t bPreferedVersion;
uint8_t bMinVersion;
uint8_t bMaxVersion;
/** XXX add UVC 1.5 parameters */
uint8_t bUsage;
uint8_t bBitDepthLuma;
uint8_t bmSettings;
uint8_t bMaxNumberOfRefFramesPlus1;
uint16_t bmRateControlModes;
uint64_t bmLayoutPerStream;
//
uint8_t bInterfaceNumber;
} uvc_stream_ctrl_t;
当然这里没有值的来源,只是传入这个struct,等后续给赋值。
下面分别分析这两个方法:
preview->prepare_preview(&ctrl);
int UVCPreview::prepare_preview(uvc_stream_ctrl_t *ctrl) {
uvc_error_t result;
ENTER();
result = uvc_get_stream_ctrl_format_size_fps(mDeviceHandle, ctrl,
!requestMode ? UVC_FRAME_FORMAT_YUYV : UVC_FRAME_FORMAT_MJPEG,
requestWidth, requestHeight, requestMinFps, requestMaxFps
);
if (LIKELY(!result)) {
#if LOCAL_DEBUG
uvc_print_stream_ctrl(ctrl, stderr);
#endif
uvc_frame_desc_t *frame_desc;
result = uvc_get_frame_desc(mDeviceHandle, ctrl, &frame_desc);
if (LIKELY(!result)) {
frameWidth = frame_desc->wWidth;
frameHeight = frame_desc->wHeight;
LOGI("frameSize=(%d,%d)@%s", frameWidth, frameHeight, (!requestMode ? "YUYV" : "MJPEG"));
pthread_mutex_lock(&preview_mutex);
if (LIKELY(mPreviewWindow)) {
ANativeWindow_setBuffersGeometry(mPreviewWindow,
frameWidth, frameHeight, previewFormat);
}
pthread_mutex_unlock(&preview_mutex);
} else {
frameWidth = requestWidth;
frameHeight = requestHeight;
}
frameMode = requestMode;
frameBytes = frameWidth * frameHeight * (!requestMode ? 2 : 4);
previewBytes = frameWidth * frameHeight * PREVIEW_PIXEL_BYTES;
} else {
LOGE("could not negotiate with camera:err=%d", result);
}
RETURN(result, int);
}
这里主要设置了一些参数,
可以看到prepare_preview主要调用了3个方法:
result = uvc_get_stream_ctrl_format_size_fps(mDeviceHandle, ctrl, !requestMode ? UVC_FRAME_FORMAT_YUYV : UVC_FRAME_FORMAT_MJPEG, requestWidth, requestHeight, requestMinFps, requestMaxFps );
result = uvc_get_frame_desc(mDeviceHandle, ctrl, &frame_desc);
ANativeWindow_setBuffersGeometry(mPreviewWindow, frameWidth, frameHeight, previewFormat);
下面简单分析这三个方法的作用:
第一个uvc_get_stream_ctrl_format_size_fps定义在stream.c文件的
uvc_get_stream_ctrl_format_size_fps内容为:
/** Get a negotiated streaming control block for some common parameters.
* @ingroup streaming
*
* @param[in] devh Device handle
* @param[in,out] ctrl Control block
* @param[in] cf Type of streaming format
* @param[in] width Desired frame width
* @param[in] height Desired frame height
* @param[in] min_fps Frame rate, minimum frames per second, this value is included
* @param[in] max_fps Frame rate, maximum frames per second, this value is included
*/
uvc_error_t uvc_get_stream_ctrl_format_size_fps(uvc_device_handle_t *devh,
uvc_stream_ctrl_t *ctrl, enum uvc_frame_format cf, int width,
int height, int min_fps, int max_fps) {
ENTER();
uvc_streaming_interface_t *stream_if;
uvc_error_t result;
memset(ctrl, 0, sizeof(*ctrl)); // XXX add
/* find a matching frame descriptor and interval */
uvc_format_desc_t *format;
DL_FOREACH(devh->info->stream_ifs, stream_if)
{
DL_FOREACH(stream_if->format_descs, format)
{
if (!_uvc_frame_format_matches_guid(cf, format->guidFormat))
continue;
result = _uvc_get_stream_ctrl_format(devh, stream_if, ctrl, format, width, height, min_fps, max_fps);
if (!result) { // UVC_SUCCESS
goto found;
}
}
}
RETURN(UVC_ERROR_INVALID_MODE, uvc_error_t);
found:
RETURN(uvc_probe_stream_ctrl(devh, ctrl), uvc_error_t);
}
主要作用是将java层用户传递的各种参数继续往里传入:
可以看到里面的这行代码:
result = _uvc_get_stream_ctrl_format(devh, stream_if, ctrl, format, width, height, min_fps, max_fps);
然后在里面设置了uvc_stream_ctrl_t *ctrl 这个对象的各种属性,
可以看到属性设置的地方:
if ((it >= (uint32_t) min_fps) && (it <= (uint32_t) max_fps)) {
ctrl->bmHint = (1 << 0); /* don't negotiate interval */
ctrl->bFormatIndex = format->bFormatIndex;
ctrl->bFrameIndex = frame->bFrameIndex;
ctrl->dwFrameInterval = *interval;
goto found;
}
第二个方法:uvc_get_frame_desc
uvc_error_t uvc_get_frame_desc(uvc_device_handle_t *devh,
uvc_stream_ctrl_t *ctrl, uvc_frame_desc_t **desc) {
*desc = uvc_find_frame_desc(devh, ctrl->bFormatIndex, ctrl->bFrameIndex);
return *desc ? UVC_SUCCESS : UVC_ERROR_INVALID_PARAM;
}
可以看到这里调用了uvc_find_frame_desc这个方法,继续点进去:uvc_find_frame_desc
/** @internal
* @brief Find the descriptor for a specific frame configuration
* @param devh UVC device
* @param format_id Index of format class descriptor
* @param frame_id Index of frame descriptor
*/
uvc_frame_desc_t *uvc_find_frame_desc(uvc_device_handle_t *devh,
uint16_t format_id, uint16_t frame_id) {
uvc_streaming_interface_t *stream_if;
uvc_frame_desc_t *frame;
DL_FOREACH(devh->info->stream_ifs, stream_if)
{
frame = _uvc_find_frame_desc_stream_if(stream_if, format_id, frame_id);
if (frame)
return frame;
}
return NULL;
}
作用就是查找特定帧配置的描述,
这里frame的定义是
uvc_frame_desc_t *frame = NULL;
这个uvc_frame_desc_t 类型定义在libuvc.h中
/** Frame descriptor
*
* A "frame" is a configuration of a streaming format
* for a particular image size at one of possibly several
* available frame rates.
*/
typedef struct uvc_frame_desc {
struct uvc_format_desc *parent;
struct uvc_frame_desc *prev, *next;
/** Type of frame, such as JPEG frame or uncompressed frme */
enum uvc_vs_desc_subtype bDescriptorSubtype;
/** Index of the frame within the list of specs available for this format */
uint8_t bFrameIndex;
uint8_t bmCapabilities;
/** Image width */
uint16_t wWidth;
/** Image height */
uint16_t wHeight;
/** Bitrate of corresponding stream at minimal frame rate */
uint32_t dwMinBitRate;
/** Bitrate of corresponding stream at maximal frame rate */
uint32_t dwMaxBitRate;
/** Maximum number of bytes for a video frame */
uint32_t dwMaxVideoFrameBufferSize;
/** Default frame interval (in 100ns units) */
uint32_t dwDefaultFrameInterval;
/** Minimum frame interval for continuous mode (100ns units) */
uint32_t dwMinFrameInterval;
/** Maximum frame interval for continuous mode (100ns units) */
uint32_t dwMaxFrameInterval;
/** Granularity of frame interval range for continuous mode (100ns) */
uint32_t dwFrameIntervalStep;
/** Frame intervals */
uint8_t bFrameIntervalType;
/** number of bytes per line */
uint32_t dwBytesPerLine;
/** Available frame rates, zero-terminated (in 100ns units) */
uint32_t *intervals;
} uvc_frame_desc_t;
可以看到这是对于一帧的具体描述,包括宽,高,间隔,序号等等一些参数。
第三个方法:
ANativeWindow_setBuffersGeometry(mPreviewWindow,
frameWidth, frameHeight, previewFormat);
这个方法的作用应该是将画面预览到mPreviewWindow上面,其中mPreviewWindow应该就是上层传入的,
具体mUVCCameraView从上层传递下来到mPreviewWindow的过程是:
AbstractUVCCameraHandler类--->handleStartPreview-->
if (surface instanceof SurfaceHolder) { mUVCCamera.setPreviewDisplay((SurfaceHolder)surface); } if (surface instanceof Surface) { mUVCCamera.setPreviewDisplay((Surface)surface); } else { mUVCCamera.setPreviewTexture((SurfaceTexture)surface); }
--->UVCCamera类--->setPreviewDisplay--->
private static final native int nativeSetPreviewDisplay(final long id_camera, final Surface surface);
--->UVCCamera.cpp--->setPreviewDisplay--->UVCPreview.cpp--->setPreviewDisplay
--->mPreviewWindow = preview_window 这里进行的赋值,见下方法:
UVCPreview::setPreviewDisplay中可以看到赋值这一行:
int UVCPreview::setPreviewDisplay(ANativeWindow *preview_window) {
ENTER();
pthread_mutex_lock(&preview_mutex);
{
if (mPreviewWindow != preview_window) {
if (mPreviewWindow)
ANativeWindow_release(mPreviewWindow);
mPreviewWindow = preview_window;
if (LIKELY(mPreviewWindow)) {
ANativeWindow_setBuffersGeometry(mPreviewWindow,
frameWidth, frameHeight, previewFormat);
}
}
}
pthread_mutex_unlock(&preview_mutex);
RETURN(0, int);
}
至此,第一个方法preview->prepare_preview(&ctrl); 分析完毕,
下面全部都是第二个方法preview->do_preview(&ctrl);的分析:
-------------------------------------分割线-------------------------------------------------------------
这里再接着将上图贴一次,方便查看:
然后接着看UVCPreview::do_preview
void UVCPreview::do_preview(uvc_stream_ctrl_t *ctrl) {
ENTER();
uvc_frame_t *frame = NULL;
uvc_frame_t *frame_mjpeg = NULL;
uvc_error_t result = uvc_start_streaming_bandwidth(
mDeviceHandle, ctrl, uvc_preview_frame_callback, (void *)this, requestBandwidth, 0);
mHasCaptureThread = false;
if (LIKELY(!result)) {
clearPreviewFrame();
if (pthread_create(&capture_thread, NULL, capture_thread_func, (void *)this) == 0) {
mHasCaptureThread = true;
}
#if LOCAL_DEBUG
LOGI("Streaming...");
#endif
if (frameMode) {
// MJPEG mode
for ( ; LIKELY(isRunning()) ; ) {
frame_mjpeg = waitPreviewFrame();
if (LIKELY(frame_mjpeg)) {
frame = get_frame(frame_mjpeg->width * frame_mjpeg->height * 2);
result = uvc_mjpeg2yuyv(frame_mjpeg, frame); // MJPEG => yuyv
recycle_frame(frame_mjpeg);
if (LIKELY(!result)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
} else {
recycle_frame(frame);
}
}
}
} else {
// yuvyv mode
for ( ; LIKELY(isRunning()) ; ) {
frame = waitPreviewFrame();
if (LIKELY(frame)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
}
}
}
pthread_cond_signal(&capture_sync);
#if LOCAL_DEBUG
LOGI("preview_thread_func:wait for all callbacks complete");
#endif
uvc_stop_streaming(mDeviceHandle);
#if LOCAL_DEBUG
LOGI("Streaming finished");
#endif
} else {
uvc_perror(result, "failed start_streaming");
}
EXIT();
}
可以看到其中调用了:
uvc_error_t result = uvc_start_streaming_bandwidth(
mDeviceHandle, ctrl, uvc_preview_frame_callback, (void *)this, requestBandwidth, 0);
从相机流设置回调,
然后创建了线程:
pthread_create(&capture_thread, NULL, capture_thread_func, (void *)this)
接着等待帧数据:
frame_mjpeg = waitPreviewFrame();
然后拿到帧:
frame = get_frame(frame_mjpeg->width * frame_mjpeg->height * 2);
绘制在窗口上:
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
这里可以看到有一个if--else结构,可以看到不论frameMode是MJPEG mode还是yuvyv mode基本流程都一致。
大体上都调用了下面三行:
frame = waitPreviewFrame(); if (LIKELY(frame)) { frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4); addCaptureFrame(frame); }
只是在模式frameMode是MJPEG mode的时候,多了一步转换处理。
下面主要分析上面红字部分的几个方法。
首先分析uvc_start_streaming_bandwidth
这个方法是在libuvc 的stream.c中
/** Begin streaming video from the camera into the callback function.
* @ingroup streaming
*
* @param devh UVC device
* @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
* {uvc_get_stream_ctrl_format_size}
* @param cb User callback function. See {uvc_frame_callback_t} for restrictions.
* @param bandwidth_factor [0.0f, 1.0f]
* @param flags Stream setup flags, currently undefined. Set this to zero. The lower bit
* is reserved for backward compatibility.
*/
uvc_error_t uvc_start_streaming_bandwidth(uvc_device_handle_t *devh,
uvc_stream_ctrl_t *ctrl, uvc_frame_callback_t *cb, void *user_ptr,
float bandwidth_factor,
uint8_t flags) {
uvc_error_t ret;
uvc_stream_handle_t *strmh;
ret = uvc_stream_open_ctrl(devh, &strmh, ctrl);
if (UNLIKELY(ret != UVC_SUCCESS))
return ret;
ret = uvc_stream_start_bandwidth(strmh, cb, user_ptr, bandwidth_factor, flags);
if (UNLIKELY(ret != UVC_SUCCESS)) {
uvc_stream_close(strmh);
return ret;
}
return UVC_SUCCESS;
}
这个方法的作用是将相机流数据传递到回调方法中,也就是uvc_frame_callback_t *cb 中。
可以看到主要调用了两个方法:
ret = uvc_stream_open_ctrl(devh, &strmh, ctrl);
ret = uvc_stream_start_bandwidth(strmh, cb, user_ptr, bandwidth_factor, flags);
第一个方法uvc_stream_open_ctrl这个方法是打开一个新的视频流,
可以看下这个方法:
/** Open a new video stream.
* @ingroup streaming
*
* @param devh UVC device
* @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
* {uvc_get_stream_ctrl_format_size}
*/
uvc_error_t uvc_stream_open_ctrl(uvc_device_handle_t *devh,
uvc_stream_handle_t **strmhp, uvc_stream_ctrl_t *ctrl) {
/* Chosen frame and format descriptors */
uvc_stream_handle_t *strmh = NULL;
uvc_streaming_interface_t *stream_if;
uvc_error_t ret;
UVC_ENTER();
if (UNLIKELY(_uvc_get_stream_by_interface(devh, ctrl->bInterfaceNumber) != NULL)) {
ret = UVC_ERROR_BUSY; /* Stream is already opened */
goto fail;
}
stream_if = _uvc_get_stream_if(devh, ctrl->bInterfaceNumber);
if (UNLIKELY(!stream_if)) {
ret = UVC_ERROR_INVALID_PARAM;
goto fail;
}
strmh = calloc(1, sizeof(*strmh));
if (UNLIKELY(!strmh)) {
ret = UVC_ERROR_NO_MEM;
goto fail;
}
strmh->devh = devh;
strmh->stream_if = stream_if;
strmh->frame.library_owns_data = 1;
ret = uvc_claim_if(strmh->devh, strmh->stream_if->bInterfaceNumber);
if (UNLIKELY(ret != UVC_SUCCESS))
goto fail;
ret = uvc_stream_ctrl(strmh, ctrl);
LOGE("----------stream.c---uvc_stream_open_ctrl--ret=%d",ret);
if (UNLIKELY(ret != UVC_SUCCESS))
goto fail;
// Set up the streaming status and data space
strmh->running = 0;
/** @todo take only what we need */
strmh->outbuf = malloc(LIBUVC_XFER_BUF_SIZE);
strmh->holdbuf = malloc(LIBUVC_XFER_BUF_SIZE);
strmh->size_buf = LIBUVC_XFER_BUF_SIZE; // xxx for boundary check
pthread_mutex_init(&strmh->cb_mutex, NULL);
pthread_cond_init(&strmh->cb_cond, NULL);
DL_APPEND(devh->streams, strmh);
*strmhp = strmh;
UVC_EXIT(0);
return UVC_SUCCESS;
fail:
if (strmh)
free(strmh);
UVC_EXIT(ret);
return ret;
}
其中devh是设备句柄,strmh是uvc流句柄,只定义未赋值,ctrl是流控制,也就是libuvc.h里面定义的uvc_stream_ctrl_t,文章最上面定义的那个struct,在上面进行了赋值,具体见上面_uvc_get_stream_ctrl_format这个地方。
这个uvc_stream_open_ctrl方法的主要作用就是根据devh配置以及ctrl配置,获取strmh配置并赋值给入参的**strmhp,现在strmh也有值了。
第二个方法uvc_stream_start_bandwidth,这个方法的作用是将视频流数据放入回调方法中。
这个方法内容很长,这里复制几个主要方法:
frame_desc = uvc_find_frame_desc_stream(strmh, ctrl->bFormatIndex, ctrl->bFrameIndex);
strmh->frame_format = uvc_frame_format_for_guid(format_desc->guidFormat);
interface_id = strmh->stream_if->bInterfaceNumber;
isochronous = interface->num_altsetting > 1;
pthread_create(&strmh->cb_thread, NULL, _uvc_user_caller, (void*) strmh);
前面的那几个方法执行后,然后计算出isochronous,然后根据isochronous 决定执行哪个分支,分别对应if和else,代码太多,这里不复制,if和else主要是根据模式不同,给strmh进行赋值。
if (isochronous) {//
MARK("isochronous transfer mode:num_altsetting=%d", interface->num_altsetting);
。。。这里省略很多代码
/* Set up the transfers */
MARK("Set up the transfers");
for (transfer_id = 0; transfer_id < LIBUVC_NUM_TRANSFER_BUFS; ++transfer_id) {
transfer = libusb_alloc_transfer(packets_per_transfer);
strmh->transfers[transfer_id] = transfer;
strmh->transfer_bufs[transfer_id] = malloc(total_transfer_size);
libusb_fill_iso_transfer(transfer, strmh->devh->usb_devh,
format_desc->parent->bEndpointAddress,
strmh->transfer_bufs[transfer_id], total_transfer_size,
packets_per_transfer, _uvc_stream_callback,
(void*) strmh, 5000);
libusb_set_iso_packet_lengths(transfer, endpoint_bytes_per_packet);
}
} else {
MARK("bulk transfer mode");
/** prepare for bulk transfer */
for (transfer_id = 0; transfer_id < LIBUVC_NUM_TRANSFER_BUFS; ++transfer_id) {
transfer = libusb_alloc_transfer(0);
strmh->transfers[transfer_id] = transfer;
strmh->transfer_bufs[transfer_id] = malloc(strmh->cur_ctrl.dwMaxPayloadTransferSize);
libusb_fill_bulk_transfer(transfer, strmh->devh->usb_devh,
format_desc->parent->bEndpointAddress,
strmh->transfer_bufs[transfer_id],
strmh->cur_ctrl.dwMaxPayloadTransferSize, _uvc_stream_callback,
(void *)strmh, 5000);
}
}
简单说就是: if----->MARK("isochronous transfer mode:num_altsetting=%d", interface->num_altsetting);
else--->MARK("bulk transfer mode");
也就是if走的是isochronous transfer mode,else走的是bulk transfer mode。
然后创建线程pthread_create(&strmh->cb_thread, NULL, _uvc_user_caller, (void*) strmh);
这里具体调用的方法是 _uvc_user_caller
点进去:
/** @internal
* @brief User callback runner thread
* @note There should be at most one of these per currently streaming device
* @param arg Device handle
*/
static void *_uvc_user_caller(void *arg) {
uvc_stream_handle_t *strmh = (uvc_stream_handle_t *) arg;
uint32_t last_seq = 0;
for (; 1 ;) {
pthread_mutex_lock(&strmh->cb_mutex);
{
for (; strmh->running && (last_seq == strmh->hold_seq) ;) {
pthread_cond_wait(&strmh->cb_cond, &strmh->cb_mutex);
}
if (UNLIKELY(!strmh->running)) {
pthread_mutex_unlock(&strmh->cb_mutex);
break;
}
last_seq = strmh->hold_seq;
if (LIKELY(!strmh->hold_bfh_err)) // XXX
_uvc_populate_frame(strmh);
}
pthread_mutex_unlock(&strmh->cb_mutex);
if (LIKELY(!strmh->hold_bfh_err)) // XXX
strmh->user_cb(&strmh->frame, strmh->user_ptr); // call user callback function
}
return NULL; // return value ignored
}
可以看到这个方法在最后面执行了回调。
这个方法首先等待数据:
pthread_cond_wait(&strmh->cb_cond, &strmh->cb_mutex);
然后拿到数据:
_uvc_populate_frame(strmh);
然后执行回调:
strmh->user_cb(&strmh->frame, strmh->user_ptr); // call user callback function
这里比较重要的一个方法是:
_uvc_populate_frame(strmh);
这个方法内容是:
/** @internal
* @brief Populate the fields of a frame to be handed to user code
* must be called with stream cb lock held!
*/
void _uvc_populate_frame(uvc_stream_handle_t *strmh) {
size_t alloc_size = strmh->cur_ctrl.dwMaxVideoFrameSize;
uvc_frame_t *frame = &strmh->frame;
uvc_frame_desc_t *frame_desc;
/** @todo this stuff that hits the main config cache should really happen
* in start() so that only one thread hits these data. all of this stuff
* is going to be reopen_on_change anyway
*/
frame_desc = uvc_find_frame_desc(strmh->devh, strmh->cur_ctrl.bFormatIndex,
strmh->cur_ctrl.bFrameIndex);
frame->frame_format = strmh->frame_format;
frame->width = frame_desc->wWidth;
frame->height = frame_desc->wHeight;
// XXX set actual_bytes to zero when erro bits is on
frame->actual_bytes = LIKELY(!strmh->hold_bfh_err) ? strmh->hold_bytes : 0;
switch (frame->frame_format) {
case UVC_FRAME_FORMAT_YUYV:
frame->step = frame->width * 2;
break;
case UVC_FRAME_FORMAT_MJPEG:
frame->step = 0;
break;
default:
frame->step = 0;
break;
}
/* copy the image data from the hold buffer to the frame (unnecessary extra buf?) */
if (UNLIKELY(frame->data_bytes < strmh->hold_bytes)) {
frame->data = realloc(frame->data, strmh->hold_bytes); // TODO add error handling when failed realloc
frame->data_bytes = strmh->hold_bytes;
}
memcpy(frame->data, strmh->holdbuf, strmh->hold_bytes/*frame->data_bytes*/); // XXX
/** @todo set the frame time */
}
可以看到,这个方法是从流中取出一帧:
uvc_frame_t *frame = &strmh->frame;
然后给frame赋值各种参数,之后从strmh->holdbuf拷贝数据到frame->data中:
memcpy(frame->data, strmh->holdbuf, strmh->hold_bytes/*frame->data_bytes*/);
然后调用回调:strmh->user_cb(&strmh->frame, strmh->user_ptr);
将这一帧数据返回给用户。
这是比较重要的一段,实际预览中,这一段会不断地被循环执行。
也就是:
stream.c---->uvc_stream_start_bandwidth---->
pthread_create(&strmh->cb_thread, NULL, _uvc_user_caller, (void*) strmh);
这里开启线程后 ,将当前的流对象给_uvc_user_caller传递进去,然后就到了---->
stream.c--->_uvc_user_caller--->_uvc_populate_frame--->
strmh->user_cb(&strmh->frame, strmh->user_ptr);
---->UVCPreview::uvc_preview_frame_callback(uvc_frame_t *frame, void *vptr_args)
---->preview->get_frame ----->uvc_duplicate_frame---->
preview->addPreviewFrame(copy)
这样的流程。
好了,这里分析完成后,uvc_start_streaming_bandwidth暂且分析完毕,
接着分析回调函数本身,
也就是uvc_preview_frame_callback,
void UVCPreview::uvc_preview_frame_callback(uvc_frame_t *frame, void *vptr_args) {
UVCPreview *preview = reinterpret_cast<UVCPreview *>(vptr_args);
if UNLIKELY(!preview->isRunning() || !frame || !frame->frame_format || !frame->data || !frame->data_bytes) return;
if (UNLIKELY(
((frame->frame_format != UVC_FRAME_FORMAT_MJPEG) && (frame->actual_bytes < preview->frameBytes))
|| (frame->width != preview->frameWidth) || (frame->height != preview->frameHeight) )) {
#if LOCAL_DEBUG
LOGD("broken frame!:format=%d,actual_bytes=%d/%d(%d,%d/%d,%d)",
frame->frame_format, frame->actual_bytes, preview->frameBytes,
frame->width, frame->height, preview->frameWidth, preview->frameHeight);
#endif
return;
}
if (LIKELY(preview->isRunning())) {
uvc_frame_t *copy = preview->get_frame(frame->data_bytes);
if (UNLIKELY(!copy)) {
#if LOCAL_DEBUG
LOGE("uvc_callback:unable to allocate duplicate frame!");
#endif
return;
}
uvc_error_t ret = uvc_duplicate_frame(frame, copy);
if (UNLIKELY(ret)) {
preview->recycle_frame(copy);
return;
}
preview->addPreviewFrame(copy);
}
}
可以看到这里比较重要的一句:拷贝帧数据:
uvc_frame_t *copy = preview->get_frame(frame->data_bytes);
然后调用:
uvc_error_t ret = uvc_duplicate_frame(frame, copy);
然后将拷贝的这一帧数据显示出来:
preview->addPreviewFrame(copy);
下面再分析这三个方法:
第一个方法:preview->get_frame:
/**
* get uvc_frame_t from frame pool
* if pool is empty, create new frame
* this function does not confirm the frame size
* and you may need to confirm the size
*/
uvc_frame_t *UVCPreview::get_frame(size_t data_bytes) {
uvc_frame_t *frame = NULL;
pthread_mutex_lock(&pool_mutex);
{
if (!mFramePool.isEmpty()) {
frame = mFramePool.last();
}
}
pthread_mutex_unlock(&pool_mutex);
if UNLIKELY(!frame) {
LOGW("allocate new frame");
frame = uvc_allocate_frame(data_bytes);
}
return frame;
}
这个方法是从mFramePool中取出来一帧,并返回。
这里mFramePool类型是ObjectArray<uvc_frame_t *> mFramePool;
其中uvc_frame_t 定义是,表示具体的一帧数据,包括数据内容,宽,高等。见下定义:
/** An image frame received from the UVC device
* @ingroup streaming
*/
typedef struct uvc_frame {
/** Image data for this frame */
void *data;
/** Size of image data buffer */
size_t data_bytes;
/** XXX Size of actual received data to confirm whether the received bytes is same
* as expected on user function when some microframes dropped */
size_t actual_bytes;
/** Width of image in pixels */
uint32_t width;
/** Height of image in pixels */
uint32_t height;
/** Pixel data format */
enum uvc_frame_format frame_format;
/** Number of bytes per horizontal line (undefined for compressed format) */
size_t step;
/** Frame number (may skip, but is strictly monotonically increasing) */
uint32_t sequence;
/** Estimate of system time when the device started capturing the image */
struct timeval capture_time;
/** Handle on the device that produced the image.
* @warning You must not call any uvc_* functions during a callback. */
uvc_device_handle_t *source;
/** Is the data buffer owned by the library?
* If 1, the data buffer can be arbitrarily reallocated by frame conversion
* functions.
* If 0, the data buffer will not be reallocated or freed by the library.
* Set this field to zero if you are supplying the buffer.
*/
uint8_t library_owns_data;
} uvc_frame_t;
通过搜索mFramePool的使用,看出mFramePool是一个循环利用的集合,不断的put,clear,recycle等。
接着分析第二个方法:uvc_duplicate_frame,这个方法在frame.c中,
/** @brief Duplicate a frame, preserving color format
* @ingroup frame
*
* @param in Original frame
* @param out Duplicate frame
*/
uvc_error_t uvc_duplicate_frame(uvc_frame_t *in, uvc_frame_t *out) {
if (UNLIKELY(uvc_ensure_frame_size(out, in->data_bytes) < 0))
return UVC_ERROR_NO_MEM;
out->width = in->width;
out->height = in->height;
out->frame_format = in->frame_format;
if (out->library_owns_data)
out->step = in->step;
out->sequence = in->sequence;
out->capture_time = in->capture_time;
out->source = in->source;
out->actual_bytes = in->actual_bytes; // XXX
#if USE_STRIDE // XXX
if (in->step && out->step) {
const int istep = in->step;
const int ostep = out->step;
const int hh = in->height < out->height ? in->height : out->height;
const int rowbytes = istep < ostep ? istep : ostep;
register void *ip = in->data;
register void *op = out->data;
int h;
for (h = 0; h < hh; h += 4) {
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
}
} else {
// compressed format? XXX if only one of the frame in / out has step, this may lead to crash...
memcpy(out->data, in->data, in->actual_bytes);
}
#else
memcpy(out->data, in->data, in->actual_bytes); // XXX
#endif
return UVC_SUCCESS;
}
可以看到这是拷贝过程,当然在调用如果拷贝失败了,则会回收这一帧,也就是释放这一帧资源,以便下次继续使用:
从uvc_preview_frame_callback里面可以看到
uvc_error_t ret = uvc_duplicate_frame(frame, copy); if (UNLIKELY(ret)) { preview->recycle_frame(copy); return; } preview->addPreviewFrame(copy);
如果ret为假,则调用回收:
if (UNLIKELY(ret)) {
preview->recycle_frame(copy);
return;
}
然后就是第三个方法:preview->addPreviewFrame(copy); 显示这一帧:
void UVCPreview::addPreviewFrame(uvc_frame_t *frame) {
pthread_mutex_lock(&preview_mutex);
if (isRunning() && (previewFrames.size() < MAX_FRAME)) {
previewFrames.put(frame);
frame = NULL;
pthread_cond_signal(&preview_sync);
}
pthread_mutex_unlock(&preview_mutex);
if (frame) {
recycle_frame(frame);
}
}
可以看到这一帧放入了:previewFrames中,
previewFrames.put(frame);
然后便释放这一帧资源以便继续使用。
然后继续上面红字部分方法的分析:
capture_thread_func
这个方法:
/*
* thread function
* @param vptr_args pointer to UVCPreview instance
*/
// static
void *UVCPreview::capture_thread_func(void *vptr_args) {
int result;
ENTER();
UVCPreview *preview = reinterpret_cast<UVCPreview *>(vptr_args);
if (LIKELY(preview)) {
JavaVM *vm = getVM();
JNIEnv *env;
// attach to JavaVM
vm->AttachCurrentThread(&env, NULL);
preview->do_capture(env); // never return until finish previewing
// detach from JavaVM
vm->DetachCurrentThread();
MARK("DetachCurrentThread");
}
PRE_EXIT();
pthread_exit(NULL);
}
其中调用了preview->do_capture(env); // never return until finish previewing
继续点进去:
/**
* the actual function for capturing
*/
void UVCPreview::do_capture(JNIEnv *env) {
ENTER();
clearCaptureFrame();
callbackPixelFormatChanged();
for (; isRunning() ;) {
mIsCapturing = true;
if (mCaptureWindow) {
do_capture_surface(env);
} else {
do_capture_idle_loop(env);
}
pthread_cond_broadcast(&capture_sync);
} // end of for (; isRunning() ;)
EXIT();
}
可以看到这里将这一帧数据绘制到了屏幕上,也就是如果当前窗口句柄mCaptureWindow存在,则绘制do_capture_surface(env),
这里插入一点,这个mCaptureWindow 是如何赋值的呢,
其实是在
int UVCPreview::setCaptureDisplay(ANativeWindow *capture_window) {
、、、
mCaptureWindow = capture_window
、、、
}
中赋值的,然后被UVCCamera::setCaptureDisplay调用,
int UVCCamera::setCaptureDisplay(ANativeWindow *capture_window) {
ENTER();
int result = EXIT_FAILURE;
if (mPreview) {
result = mPreview->setCaptureDisplay(capture_window);
}
RETURN(result, int);
}
而UVCCamera::setCaptureDisplay又被UVCCamera.java中的
private static final native int nativeSetCaptureDisplay(final long id_camera, final Surface surface);
调用,是用来录像的。这个后面会讨论。
接着看do_capture_surface(env)这个方法:
/**
* write frame data to Surface for capturing
*/
void UVCPreview::do_capture_surface(JNIEnv *env) {
ENTER();
uvc_frame_t *frame = NULL;
uvc_frame_t *converted = NULL;
char *local_picture_path;
for (; isRunning() && isCapturing() ;) {
frame = waitCaptureFrame();
if (LIKELY(frame)) {
// frame data is always YUYV format.
if LIKELY(isCapturing()) {
if (UNLIKELY(!converted)) {
converted = get_frame(previewBytes);
}
if (LIKELY(converted)) {
int b = uvc_any2rgbx(frame, converted);
if (!b) {
if (LIKELY(mCaptureWindow)) {
copyToSurface(converted, &mCaptureWindow);
}
}
}
}
do_capture_callback(env, frame);
}
}
if (converted) {
recycle_frame(converted);
}
if (mCaptureWindow) {
ANativeWindow_release(mCaptureWindow);
mCaptureWindow = NULL;
}
EXIT();
}
这个方法主要就是调用copyToSurface(converted, &mCaptureWindow); 将转换后的帧converted显示到&mCaptureWindow上。
如果窗口句柄不存在,则调用do_capture_idle_loop,然后等待预览,也就是一直循环等待条件成立:
void UVCPreview::do_capture_idle_loop(JNIEnv *env) {
ENTER();
for (; isRunning() && isCapturing() ;) {
do_capture_callback(env, waitCaptureFrame());
}
EXIT();
}
然后接着分析红字部分的方法:
frame_mjpeg = waitPreviewFrame();
这个方法
uvc_frame_t *UVCPreview::waitPreviewFrame() {
uvc_frame_t *frame = NULL;
pthread_mutex_lock(&preview_mutex);
{
if (!previewFrames.size()) {
pthread_cond_wait(&preview_sync, &preview_mutex);
}
if (LIKELY(isRunning() && previewFrames.size() > 0)) {
frame = previewFrames.remove(0);
}
}
pthread_mutex_unlock(&preview_mutex);
return frame;
}
这里主要就是等待previewFrames中数据变化,如果previewFrames中有frame被放入,就立刻取出来并返回,如果previewFrames中没有数据帧,就等待
pthread_cond_wait(&preview_sync, &preview_mutex);
这个需要其他线程唤醒,也就是有新的帧数据后唤醒这里,才会继续执行取出并返回。
那么这个previewFrames在等待什么呢,就是等待上面说的那个mFramePool,当有了新的一帧,mFramePool会取出来这一帧放入到previewFrames中,然后这里就会被唤醒,接着取出这一帧并返回。
继续分析红字的方法:frame = get_frame(frame_mjpeg->width * frame_mjpeg->height * 2);
这个方法就是从mFramePool中取出一帧:
/**
* get uvc_frame_t from frame pool
* if pool is empty, create new frame
* this function does not confirm the frame size
* and you may need to confirm the size
*/
uvc_frame_t *UVCPreview::get_frame(size_t data_bytes) {
uvc_frame_t *frame = NULL;
pthread_mutex_lock(&pool_mutex);
{
if (!mFramePool.isEmpty()) {
frame = mFramePool.last();
}
}
pthread_mutex_unlock(&pool_mutex);
if UNLIKELY(!frame) {
LOGW("allocate new frame");
frame = uvc_allocate_frame(data_bytes);
}
return frame;
}
注意这个方法只在mode为MJPEG mode的时候才调用,在yuvyv mode则不会被调用,而是直接从waitPreviewFrame中取出frame:
frame = waitPreviewFrame();
也就是说,在MJPEG mode的时候多了一次转换,需要将mjpeg转为yuv:
result = uvc_mjpeg2yuyv(frame_mjpeg, frame); // MJPEG => yuyv
所以最终的格式应该都是yuv格式。
上面说了,UVCPreview::do_preview 这个方法里面有一个if-else结构,是根据mode执行MJPEG mode,或者yuvyv mode,那么这个值到底是true还是false呢,其实是由上层构造mCameraHandler的时候,传递的PREVIEW_MODE决定的,
mCameraHandler = UVCCameraHandler.createHandler(this, mUVCCameraView,
USE_SURFACE_ENCODER ? 0 : 1, PREVIEW_WIDTH, PREVIEW_HEIGHT, PREVIEW_MODE);
定义见下,如果PREVIEW_MODE 是1,则为MJPEG,否则为YUYV,
/**
* preview mode
* if your camera does not support specific resolution and mode,
* {@link UVCCamera#setPreviewSize(int, int, int)} throw exception
* 0:YUYV, other:MJPEG
*/
private static final int PREVIEW_MODE = 1;
这里给的是1,也就是MJPEG模式,所以在UVCPreview::do_preview里面,一般走的是if分支,也就是需要转换。
然后继续分析红字的方法:
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4); addCaptureFrame(frame);
这个方法
// changed to return original frame instead of returning converted frame even if convert_func is not null.
uvc_frame_t *UVCPreview::draw_preview_one(uvc_frame_t *frame, ANativeWindow **window, convFunc_t convert_func, int pixcelBytes) {
// ENTER();
int b = 0;
pthread_mutex_lock(&preview_mutex);
{
b = *window != NULL;
}
pthread_mutex_unlock(&preview_mutex);
if (LIKELY(b)) {
uvc_frame_t *converted;
if (convert_func) {
converted = get_frame(frame->width * frame->height * pixcelBytes);
if LIKELY(converted) {
b = convert_func(frame, converted);
if (!b) {
pthread_mutex_lock(&preview_mutex);
copyToSurface(converted, window);
pthread_mutex_unlock(&preview_mutex);
} else {
LOGE("failed converting");
}
recycle_frame(converted);
}
} else {
pthread_mutex_lock(&preview_mutex);
copyToSurface(frame, window);
pthread_mutex_unlock(&preview_mutex);
}
}
return frame; //RETURN(frame, uvc_frame_t *);
}
这个方法是一个转换并显示的方法,根据传入的convert_func是否为空最终将frame绘制到ANativeWindow上:
如果传入了转换方法:convert_func则调用转换,然后显示copyToSurface(converted, window);
如果没有传入转换方法,则直接调用显示:copyToSurface(converted, window);
而从上面的调用可以看到,if和else都 传入了转换方法:uvc_any2rgbx,也就是走执行了转换然后显示。
copyToSurface这个方法内容是:
// transfer specific frame data to the Surface(ANativeWindow)
int copyToSurface(uvc_frame_t *frame, ANativeWindow **window) {
// ENTER();
int result = 0;
if (LIKELY(*window)) {
ANativeWindow_Buffer buffer;
if (LIKELY(ANativeWindow_lock(*window, &buffer, NULL) == 0)) {
// source = frame data
const uint8_t *src = (uint8_t *)frame->data;
const int src_w = frame->width * PREVIEW_PIXEL_BYTES;
const int src_step = frame->width * PREVIEW_PIXEL_BYTES;
// destination = Surface(ANativeWindow)
uint8_t *dest = (uint8_t *)buffer.bits;
const int dest_w = buffer.width * PREVIEW_PIXEL_BYTES;
const int dest_step = buffer.stride * PREVIEW_PIXEL_BYTES;
// use lower transfer bytes
const int w = src_w < dest_w ? src_w : dest_w;
// use lower height
const int h = frame->height < buffer.height ? frame->height : buffer.height;
// transfer from frame data to the Surface
copyFrame(src, dest, w, h, src_step, dest_step);
ANativeWindow_unlockAndPost(*window);
} else {
result = -1;
}
} else {
result = -1;
}
return result; //RETURN(result, int);
}
其中调用了copyFrame方法,这个copyFrame方法见下:
static void copyFrame(const uint8_t *src, uint8_t *dest, const int width, int height, const int stride_src, const int stride_dest) {
const int h8 = height % 8;
for (int i = 0; i < h8; i++) {
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
}
for (int i = 0; i < height; i += 8) {
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
}
}
然后调用ANativeWindow_unlockAndPost(*window);进行了显示。
分析完draw_preview_one方法后,接着是addCaptureFrame(frame),这里继续分析这个方法:
然后接着是addCaptureFrame(frame);的方法
void UVCPreview::addCaptureFrame(uvc_frame_t *frame) {
pthread_mutex_lock(&capture_mutex);
if (LIKELY(isRunning())) {
// keep only latest one
if (captureQueu) {
recycle_frame(captureQueu);
}
captureQueu = frame;
pthread_cond_broadcast(&capture_sync);
}
pthread_mutex_unlock(&capture_mutex);
}
可以看到这里是将这一帧frame赋值给captureQueu
然后调用recycle_frame将这一帧回收。
至此,红字部分的流程do_preview分析完毕。
--------------------------------分割线-------------------------------------------
这里就有一个疑问:上面有两处调用绘制,最终绘制到底调用的是哪个方法?
到底是调用的
UVCPreview::do_capture里面的copyToSurface(converted, &mCaptureWindow);
还是调用的
UVCPreview::draw_preview_one 里面的 copyToSurface(converted, window);
因为这俩最终都调用的是同一个方法copyToSurface, 代码见上面,
而这个方法里面主要调用了这行:
ANativeWindow_unlockAndPost(*window);进行了显示绘制
而且从入参定义上看,
do_preview里面的draw_preview_one 方法,这个方法draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
里面的window的参数是&mPreviewWindow 类型,
而
do_capture里面do_capture_surface(env); 参数window给的是&mCaptureWindow,这俩参数的类型定义见下:
ANativeWindow *mPreviewWindow; 和 ANativeWindow *mCaptureWindow;
都是同样的类型。
从调用上来看,UVCPreview::do_capture方法是被 UVCPreview::capture_thread_func调用的,而
UVCPreview::capture_thread_func 是被UVCPreview::do_preview中
if (pthread_create(&capture_thread, NULL, capture_thread_func, (void *)this) == 0){}
调用的。
所以按道理这两个都会被调用。
只不过从上层来看,UVCCamera.java类有俩方法:
private static final native int nativeSetCaptureDisplay(final long id_camera, final Surface surface);
和
private static final native int nativeStartPreview(final long id_camera);
分别对应:
int UVCPreview::setCaptureDisplay(ANativeWindow *capture_window) {}
和
int UVCPreview::startPreview() {}
而这俩native方法分别被
/**
* start movie capturing(this should call while previewing)
* @param surface
*/
public void startCapture(final Surface surface) {
if (mCtrlBlock != null && surface != null) {
nativeSetCaptureDisplay(mNativePtr, surface);
} else
throw new NullPointerException("startCapture");
}
和
/**
* start preview
*/
public synchronized void startPreview() {
if (mCtrlBlock != null) {
if (DEBUG) Log.e("debug", "--------------mUVCCamera startPreview:");
nativeStartPreview(mNativePtr);
}
}
调用,也就是分别被startCapture和startPreview调用。
startCapture被AbstractUVCCameraHandler的
MediaEncoder.MediaEncoderListener mMediaEncoderListener
的onPrepared(final MediaEncoder encoder)调用,然后继续被handleStartRecording()调用,
见:
case MSG_CAPTURE_START:
thread.handleStartRecording();
break;
,而startPreview被AbstractUVCCameraHandler的handleStartPreview调用,
见:
case MSG_PREVIEW_START:
thread.handleStartPreview(msg.obj);
break;
这俩都是handleMessage处理方法时候被调用,
MSG_CAPTURE_START发送消息的地方是:开始录像:
public void startRecording() { checkReleased(); sendEmptyMessage(MSG_CAPTURE_START); }
而
MSG_PREVIEW_START发送消息的地方是:开始预览:
protected void startPreview(final Object surface) { checkReleased(); if (!((surface instanceof SurfaceHolder) || (surface instanceof Surface) || (surface instanceof SurfaceTexture))) { throw new IllegalArgumentException("surface should be one of SurfaceHolder, Surface or SurfaceTexture"); } sendMessage(obtainMessage(MSG_PREVIEW_START, surface)); }
从上可以看出,在预览的时候,调用的应该是handleStartPreview,而录像的时候调用的是handleStartRecording。
所以实际预览的时候,应该是后面的方法不断被调用,也就是
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4); addCaptureFrame(frame);
这俩方法会被不断调用。
当然这只是个人分析,如有错误请指出,谢谢。
到此,UVCCamera的预览流程大概分析到这里。待续。。。