内容简介:在
avcodec_decode_video2()
的作用是解码一帧视频数据,输入一个压缩编码的结构体 AVPacket,输出一个解码后的结构体 AVFrame
avcodec_decode_video2
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt) { return compat_decode(avctx, picture, got_picture_ptr, avpkt); } static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt) { AVCodecInternal *avci = avctx->internal; int ret = 0; av_assert0(avci->compat_decode_consumed == 0); *got_frame = 0; avci->compat_decode = 1; if (avci->compat_decode_partial_size > 0 && avci->compat_decode_partial_size != pkt->size) { av_log(avctx, AV_LOG_ERROR, "Got unexpected packet size after a partial decode\n"); ret = AVERROR(EINVAL); goto finish; } if (!avci->compat_decode_partial_size) { ret = avcodec_send_packet(avctx, pkt); if (ret == AVERROR_EOF) ret = 0; else if (ret == AVERROR(EAGAIN)) { /* we fully drain all the output in each decode call, so this should not * ever happen */ ret = AVERROR_BUG; goto finish; } else if (ret < 0) goto finish; } while (ret >= 0) { ret = avcodec_receive_frame(avctx, frame); if (ret < 0) { if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) ret = 0; goto finish; } if (frame != avci->compat_decode_frame) { if (!avctx->refcounted_frames) { ret = unrefcount_frame(avci, frame); if (ret < 0) goto finish; } *got_frame = 1; frame = avci->compat_decode_frame; } else { if (!avci->compat_decode_warned) { av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* " "API cannot return all the frames for this decoder. " "Some frames will be dropped. Update your code to the " "new decoding API to fix this.\n"); avci->compat_decode_warned = 1; } } if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size)) break; } finish: if (ret == 0) { /* if there are any bsfs then assume full packet is always consumed */ if (avctx->codec->bsfs) ret = pkt->size; else ret = FFMIN(avci->compat_decode_consumed, pkt->size); } avci->compat_decode_consumed = 0; avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0; return ret; }
avcodec_decode_video2()
实际上调用的是 compat_decode()
,而在其中调用了 avcodec_send_packet()
来实现了真正的解码,通过 avcodec_receive_frame()
来获得解码之后的 AVFrame
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) { AVCodecInternal *avci = avctx->internal; int ret; if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec)) return AVERROR(EINVAL); if (avctx->internal->draining) return AVERROR_EOF; if (avpkt && !avpkt->size && avpkt->data) return AVERROR(EINVAL); ret = bsfs_init(avctx); if (ret < 0) return ret; av_packet_unref(avci->buffer_pkt); if (avpkt && (avpkt->data || avpkt->side_data_elems)) { ret = av_packet_ref(avci->buffer_pkt, avpkt); if (ret < 0) return ret; } ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt); if (ret < 0) { av_packet_unref(avci->buffer_pkt); return ret; } if (!avci->buffer_frame->buf[0]) { ret = decode_receive_frame_internal(avctx, avci->buffer_frame); if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) return ret; } return 0; }
在 avcodec_send_packet()
中调用了 decode_receive_frame_internal()
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; int ret; av_assert0(!frame->buf[0]); if (avctx->codec->receive_frame) ret = avctx->codec->receive_frame(avctx, frame); else ret = decode_simple_receive_frame(avctx, frame); if (ret == AVERROR_EOF) avci->draining_done = 1; return ret; } static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame) { int ret; while (!frame->buf[0]) { ret = decode_simple_internal(avctx, frame); if (ret < 0) return ret; } return 0; } /* * The core of the receive_frame_wrapper for the decoders implementing * the simple API. Certain decoders might consume partial packets without * returning any output, so this function needs to be called in a loop until it * returns EAGAIN. **/ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; DecodeSimpleContext *ds = &avci->ds; AVPacket *pkt = ds->in_pkt; // copy to ensure we do not change pkt AVPacket tmp; int got_frame, actual_got_frame, did_split; int ret; if (!pkt->data && !avci->draining) { av_packet_unref(pkt); ret = ff_decode_get_packet(avctx, pkt); if (ret < 0 && ret != AVERROR_EOF) return ret; } // Some codecs (at least wma lossless) will crash when feeding drain packets // after EOF was signaled. if (avci->draining_done) return AVERROR_EOF; if (!pkt->data && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY || avctx->active_thread_type & FF_THREAD_FRAME)) return AVERROR_EOF; tmp = *pkt; #if FF_API_MERGE_SD FF_DISABLE_DEPRECATION_WARNINGS did_split = avci->compat_decode_partial_size ? ff_packet_split_and_drop_side_data(&tmp) : av_packet_split_side_data(&tmp); if (did_split) { ret = extract_packet_props(avctx->internal, &tmp); if (ret < 0) return ret; ret = apply_param_change(avctx, &tmp); if (ret < 0) return ret; } FF_ENABLE_DEPRECATION_WARNINGS #endif got_frame = 0; if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) { ret = ff_thread_decode_frame(avctx, frame, &got_frame, &tmp); } else { ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp); if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)) frame->pkt_dts = pkt->dts; if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if(!avctx->has_b_frames) frame->pkt_pos = pkt->pos; //FIXME these should be under if(!avctx->has_b_frames) /* get_buffer is supposed to set frame parameters */ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) { if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio; if (!frame->width) frame->width = avctx->width; if (!frame->height) frame->height = avctx->height; if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt; } } } emms_c(); actual_got_frame = got_frame; if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if (frame->flags & AV_FRAME_FLAG_DISCARD) got_frame = 0; if (got_frame) frame->best_effort_timestamp = guess_correct_pts(avctx, frame->pts, frame->pkt_dts); } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { uint8_t *side; int side_size; uint32_t discard_padding = 0; uint8_t skip_reason = 0; uint8_t discard_reason = 0; if (ret >= 0 && got_frame) { frame->best_effort_timestamp = guess_correct_pts(avctx, frame->pts, frame->pkt_dts); if (frame->format == AV_SAMPLE_FMT_NONE) frame->format = avctx->sample_fmt; if (!frame->channel_layout) frame->channel_layout = avctx->channel_layout; if (!frame->channels) frame->channels = avctx->channels; if (!frame->sample_rate) frame->sample_rate = avctx->sample_rate; } side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size); if(side && side_size>=10) { avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier; discard_padding = AV_RL32(side + 4); av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n", avctx->internal->skip_samples, (int)discard_padding); skip_reason = AV_RL8(side + 8); discard_reason = AV_RL8(side + 9); } if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples); got_frame = 0; } if (avctx->internal->skip_samples > 0 && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { if(frame->nb_samples <= avctx->internal->skip_samples){ got_frame = 0; avctx->internal->skip_samples -= frame->nb_samples; av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", avctx->internal->skip_samples); } else { av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); if(avctx->pkt_timebase.num && avctx->sample_rate) { int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); if(frame->pts!=AV_NOPTS_VALUE) frame->pts += diff_ts; #if FF_API_PKT_PTS FF_DISABLE_DEPRECATION_WARNINGS if(frame->pkt_pts!=AV_NOPTS_VALUE) frame->pkt_pts += diff_ts; FF_ENABLE_DEPRECATION_WARNINGS #endif if(frame->pkt_dts!=AV_NOPTS_VALUE) frame->pkt_dts += diff_ts; if (frame->pkt_duration >= diff_ts) frame->pkt_duration -= diff_ts; } else { av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); } av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", avctx->internal->skip_samples, frame->nb_samples); frame->nb_samples -= avctx->internal->skip_samples; avctx->internal->skip_samples = 0; } } if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { if (discard_padding == frame->nb_samples) { got_frame = 0; } else { if(avctx->pkt_timebase.num && avctx->sample_rate) { int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); frame->pkt_duration = diff_ts; } else { av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n"); } av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n", (int)discard_padding, frame->nb_samples); frame->nb_samples -= discard_padding; } } if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) { AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); if (fside) { AV_WL32(fside->data, avctx->internal->skip_samples); AV_WL32(fside->data + 4, discard_padding); AV_WL8(fside->data + 8, skip_reason); AV_WL8(fside->data + 9, discard_reason); avctx->internal->skip_samples = 0; } } } #if FF_API_MERGE_SD if (did_split) { av_packet_free_side_data(&tmp); if(ret == tmp.size) ret = pkt->size; } #endif if (avctx->codec->type == AVMEDIA_TYPE_AUDIO && !avci->showed_multi_packet_warning && ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) { av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n"); avci->showed_multi_packet_warning = 1; } if (!got_frame) av_frame_unref(frame); if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED)) ret = pkt->size; #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif /* do not stop draining when actual_got_frame != 0 or ret < 0 */ /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */ if (avctx->internal->draining && !actual_got_frame) { if (ret < 0) { /* prevent infinite loop if a decoder wrongly always return error on draining */ /* reasonable nb_errors_max = maximum b frames + thread count */ int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ? avctx->thread_count : 1); if (avci->nb_draining_errors++ >= nb_errors_max) { av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. " "Stop draining and force EOF.\n"); avci->draining_done = 1; ret = AVERROR_BUG; } } else { avci->draining_done = 1; } } avci->compat_decode_consumed += ret; if (ret >= pkt->size || ret < 0) { av_packet_unref(pkt); } else { int consumed = ret; pkt->data += consumed; pkt->size -= consumed; avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment. pkt->pts = AV_NOPTS_VALUE; pkt->dts = AV_NOPTS_VALUE; avci->last_pkt_props->pts = AV_NOPTS_VALUE; avci->last_pkt_props->dts = AV_NOPTS_VALUE; } if (got_frame) av_assert0(frame->buf[0]); return ret < 0 ? ret : 0; }
一系列调用下来最重要的是 ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp)
,实际上调用了 AVCodec 的 decode()
方法来完成解码,这也是一个指针函数
AVCodec ff_h264_decoder = { .name = "h264", .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H264, .priv_data_size = sizeof(H264Context), .init = h264_decode_init, .close = h264_decode_end, .decode = h264_decode_frame, .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING, .flush = flush_dpb, .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context), .profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles), .priv_class = &h264_class, };
从 ff_h264_decoder
可以看出 decode()
是指向了 h264_decode_frame()
方法
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H264Context *h = avctx->priv_data; AVFrame *pict = data; int buf_index; int ret; h->flags = avctx->flags; h->setup_finished = 0; h->nb_slice_ctx_queued = 0; ff_h264_unref_picture(h, &h->last_pic_for_ec); /* end of stream, output what is still in the buffers */ if (buf_size == 0) return send_next_delayed_frame(h, pict, got_frame, 0); if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) { int side_size; uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); if (is_extra(side, side_size)) ff_h264_decode_extradata(side, side_size, &h->ps, &h->is_avc, &h->nal_length_size, avctx->err_recognition, avctx); } if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){ if (is_extra(buf, buf_size)) return ff_h264_decode_extradata(buf, buf_size, &h->ps, &h->is_avc, &h->nal_length_size, avctx->err_recognition, avctx); } buf_index = decode_nal_units(h, buf, buf_size); if (buf_index < 0) return AVERROR_INVALIDDATA; if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) { av_assert0(buf_index <= buf_size); return send_next_delayed_frame(h, pict, got_frame, buf_index); } if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) { if (avctx->skip_frame >= AVDISCARD_NONREF || buf_size >= 4 && !memcmp("Q264", buf, 4)) return buf_size; av_log(avctx, AV_LOG_ERROR, "no frame!\n"); return AVERROR_INVALIDDATA; } if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) || (h->mb_y >= h->mb_height && h->mb_height)) { if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0) return ret; /* Wait for second field. */ if (h->next_output_pic) { ret = finalize_frame(h, pict, h->next_output_pic, got_frame); if (ret < 0) return ret; } } av_assert0(pict->buf[0] || !*got_frame); ff_h264_unref_picture(h, &h->last_pic_for_ec); return get_consumed_bytes(buf_index, buf_size); }
调用了 decode_nal_units()
方法来完成解码
以上所述就是小编给大家介绍的《ijkplayer框架简析 -- avcodec_decode_video2》,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对 码农网 的支持!
猜你喜欢:- ijkplayer框架简析 — FFmpeg中重要结构体
- iOS Appium 自动化测试框架原理简析
- ijkplayer框架简析 -- avcodec_open2
- ijkplayer框架简析 -- av_register_all
- Tomcat启动流程简析
- Flux模式简析
本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们。