ffplay(2.0.1)中的音视频同步

时间:2023-03-08 21:41:47

最近在看ffmpeg相关的一些东西,以及一些播放器相关资料和代码。

然后对于ffmpeg-2.0.1版本下的ffplay进行了大概的代码阅读,其中这里把里面的音视频同步,按个人的理解,暂时在这里作个笔记。

在ffplay2.0.1版本里面,视频的刷新不再直接使用SDL里面的定时器了,而是在主的循环中event_loop中,通过调用函数refresh_loop_wait_event来等待事件,

同时在这个refresh_loop_wait_event函数里面,通过使用休眠函数av_usleep 来进行定时刷新视频。

调用视频更新的代码:

 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
double remaining_time = 0.0;
SDL_PumpEvents();/*不停的循环内部更新消息*/
while (!SDL_PeepEvents(event, , SDL_GETEVENT, SDL_ALLEVENTS)) {/*check the event queue for messages*/
if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
SDL_ShowCursor();
cursor_hidden = ;
}
9 if (remaining_time > 0.0)
10 av_usleep((int64_t)(remaining_time * 1000000.0));/*使用这个函数来休眠,取代之前版本中的定时器*/
11 remaining_time = REFRESH_RATE;/*10ms*/
12 if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
13 video_refresh(is, &remaining_time);
SDL_PumpEvents();
}
}

然后接下来,我们来看看video_refresh函数里面做了些什么事情吧!

代码如下:

 /* called to display each frame */
static void video_refresh(void *opaque, double *remaining_time)
{
VideoState *is = opaque;
VideoPicture *vp;
double time; SubPicture *sp, *sp2; if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)/*如果用外部时钟同步的话*/
check_external_clock_speed(is); if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
time = av_gettime() / 1000000.0;
if (is->force_refresh || is->last_vis_time + rdftspeed < time) {/*强制刷新视频*/
video_display(is);
is->last_vis_time = time;/*记录本次的时间*/
}
*remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
} if (is->video_st) {
int redisplay = ;
if (is->force_refresh)
redisplay = pictq_prev_picture(is);
retry:
if (is->pictq_size == ) {/*如果缓冲区没有数据*/
SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
is->frame_last_dropped_pts = AV_NOPTS_VALUE;
}
SDL_UnlockMutex(is->pictq_mutex);
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
/* dequeue the picture */
vp = &is->pictq[is->pictq_rindex]; if (vp->serial != is->videoq.serial) {
pictq_next_picture(is);
redisplay = ;
goto retry;
} if (is->paused)
goto display; 49 /* compute nominal last_duration *//*通过计算当前要显示的帧和上一帧pts的差来预测当期帧显示时间---预测--->下一帧的到来时间*/
50 last_duration = vp->pts - is->frame_last_pts;/*计算上一帧的显示时间(名义上)*/
51 if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {/*判断上一帧显示的时间是否在范围内*/
52 /* if duration of the last frame was sane, update last_duration in video state */
53 is->frame_last_duration = last_duration;/*更新一帧的持续显示时间*/
54 }
55 if (redisplay)
56 delay = 0.0;
57 else
58 delay = compute_target_delay(is->frame_last_duration, is);/*通过上一帧的情况来预测本次的情况,这样可以得到下一帧的到来时间*/
59
60 time= av_gettime()/1000000.0;
61 if (time < is->frame_timer + delay && !redisplay) {/**/
62 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
63 return;
64 }
65
66 is->frame_timer += delay;
67 if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
68 is->frame_timer = time;
69
70 SDL_LockMutex(is->pictq_mutex);
71 if (!redisplay && !isnan(vp->pts))
72 update_video_pts(is, vp->pts, vp->pos, vp->serial);/*更新当前帧pts和pos*/
73 SDL_UnlockMutex(is->pictq_mutex);
74
75 if (is->pictq_size > 1) {/*如果缓冲中帧数比较多的时候,例如下一帧也已经到了*/
76 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
77 duration = nextvp->pts - vp->pts;/*这个时候,应该用已经在缓存中的下一帧pts-当前pts来真实计算当前持续显示时间*/
78 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){/*如果延迟时间超过一帧了,就采取丢掉当前帧*/
79 if (!redisplay)
80 is->frame_drops_late++;
81 pictq_next_picture(is);/*采取丢帧策略,丢弃迟来的帧,取下一帧*/
82 redisplay = 0;
83 goto retry;
84 }
} if (is->subtitle_st) {
while (is->subpq_size > ) {
sp = &is->subpq[is->subpq_rindex]; if (is->subpq_size > )
sp2 = &is->subpq[(is->subpq_rindex + ) % SUBPICTURE_QUEUE_SIZE];
else
sp2 = NULL; if (sp->serial != is->subtitleq.serial
|| (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / )))
|| (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / ))))
{
free_subpicture(sp); /* update queue size and signal for next picture */
if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
is->subpq_rindex = ; SDL_LockMutex(is->subpq_mutex);
is->subpq_size--;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
} else {
break;
}
}
} display:
/* display picture */
if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
video_display(is); pictq_next_picture(is); if (is->step && !is->paused)
stream_toggle_pause(is);
}
}
is->force_refresh = ;
if (show_status) {/*显示状态*/
static int64_t last_time;
int64_t cur_time;
int aqsize, vqsize, sqsize;
double av_diff; cur_time = av_gettime();
if (!last_time || (cur_time - last_time) >= ) {
aqsize = ;
vqsize = ;
sqsize = ;
if (is->audio_st)
aqsize = is->audioq.size;
if (is->video_st)
vqsize = is->videoq.size;
if (is->subtitle_st)
sqsize = is->subtitleq.size;
av_diff = ;
if (is->audio_st && is->video_st)
av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
else if (is->video_st)
av_diff = get_master_clock(is) - get_clock(&is->vidclk);
else if (is->audio_st)
av_diff = get_master_clock(is) - get_clock(&is->audclk);
av_log(NULL, AV_LOG_INFO,
"%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is),
(is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
av_diff,
is->frame_drops_early + is->frame_drops_late,
aqsize / ,
vqsize / ,
sqsize,
is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : ,
is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : );
fflush(stdout);
last_time = cur_time;
}
}
}

首先说明一下,这里在ffplay里面默认模式是使用音频做主时钟源。

其中上面加红色的代码是主要的策略:

  他通过计算当前这一帧vp->pts和前面那一帧的pts之差来得到上一帧的显示时间。

  然后再根据这个上面计算得到的上一帧的显示时间来估算预测计算当前这一帧的显示时间,这样就可以得到预测下一帧的pts时间了。

  这里预测下一帧的出现时间,刷新时间,调用了compute_target_delay来进行处理:

代码如下:compute_target_delay

 static double compute_target_delay(double delay, VideoState *is)
{
double sync_threshold, diff; /* update delay to follow master synchronisation source */
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame *//*我们通过复制和删除一帧来纠正大的延时*/
diff = get_clock(&is->vidclk) - get_master_clock(is); /* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
if (diff <= -sync_threshold)/*当前视频帧落后于主时钟源*/
{
delay = FFMAX(, delay + diff);
}
else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)/*视频帧超前,但If a frame duration is longer than this, it will not be duplicated to compensate AV sync*/
{ /*大概意思是:本来当视频帧超前的时候,
我们应该要选择重复该帧或者下面的2倍延时(即加重延时的策略),
但因为该帧的显示时间大于显示更新门槛,
所以这个时候不应该以该帧做同步*/
delay = delay + diff;
}
else if (diff >= sync_threshold)
{
delay = * delay;/*采取加倍延时*/
}
}
} av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
delay, -diff); return delay;
}

  在这个函数里面通过得带视频时间和参考时间之间的差值diff,然后再结合diff的情况来处理delay。