Libav
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "libavutil/timer.h"
33 #include "internal.h"
34 #include "cabac.h"
35 #include "cabac_functions.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "h264.h"
39 #include "h264data.h"
40 #include "h264chroma.h"
41 #include "h264_mvpred.h"
42 #include "golomb.h"
43 #include "mathops.h"
44 #include "me_cmp.h"
45 #include "mpegutils.h"
46 #include "rectangle.h"
47 #include "svq3.h"
48 #include "thread.h"
49 
50 #include <assert.h>
51 
52 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
53 
54 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
55  int (*mv)[2][4][2],
56  int mb_x, int mb_y, int mb_intra, int mb_skipped)
57 {
58  H264Context *h = opaque;
59 
60  h->mb_x = mb_x;
61  h->mb_y = mb_y;
62  h->mb_xy = mb_x + mb_y * h->mb_stride;
63  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
64  assert(ref >= 0);
65  /* FIXME: It is possible albeit uncommon that slice references
66  * differ between slices. We take the easy approach and ignore
67  * it for now. If this turns out to have any relevance in
68  * practice then correct remapping should be added. */
69  if (ref >= h->ref_count[0])
70  ref = 0;
71  fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
72  2, 2, 2, ref, 1);
73  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
74  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
75  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
76  assert(!FRAME_MBAFF(h));
78 }
79 
81 {
82  AVCodecContext *avctx = h->avctx;
83  AVFrame *cur = &h->cur_pic.f;
84  AVFrame *last = h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0].f : NULL;
85  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
86  int vshift = desc->log2_chroma_h;
87  const int field_pic = h->picture_structure != PICT_FRAME;
88  if (field_pic) {
89  height <<= 1;
90  y <<= 1;
91  }
92 
93  height = FFMIN(height, avctx->height - y);
94 
95  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
96  return;
97 
98  if (avctx->draw_horiz_band) {
99  AVFrame *src;
100  int offset[AV_NUM_DATA_POINTERS];
101  int i;
102 
103  if (cur->pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
105  src = cur;
106  else if (last)
107  src = last;
108  else
109  return;
110 
111  offset[0] = y * src->linesize[0];
112  offset[1] =
113  offset[2] = (y >> vshift) * src->linesize[1];
114  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
115  offset[i] = 0;
116 
117  emms_c();
118 
119  avctx->draw_horiz_band(avctx, src, offset,
120  y, h->picture_structure, height);
121  }
122 }
123 
129 {
130  static const int8_t top[12] = {
131  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
132  };
133  static const int8_t left[12] = {
134  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
135  };
136  int i;
137 
138  if (!(h->top_samples_available & 0x8000)) {
139  for (i = 0; i < 4; i++) {
140  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
141  if (status < 0) {
143  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
144  status, h->mb_x, h->mb_y);
145  return AVERROR_INVALIDDATA;
146  } else if (status) {
147  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
148  }
149  }
150  }
151 
152  if ((h->left_samples_available & 0x8888) != 0x8888) {
153  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
154  for (i = 0; i < 4; i++)
155  if (!(h->left_samples_available & mask[i])) {
156  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
157  if (status < 0) {
159  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
160  status, h->mb_x, h->mb_y);
161  return AVERROR_INVALIDDATA;
162  } else if (status) {
163  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
164  }
165  }
166  }
167 
168  return 0;
169 } // FIXME cleanup like ff_h264_check_intra_pred_mode
170 
175 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
176 {
177  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
178  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
179 
180  if (mode > 3U) {
182  "out of range intra chroma pred mode at %d %d\n",
183  h->mb_x, h->mb_y);
184  return AVERROR_INVALIDDATA;
185  }
186 
187  if (!(h->top_samples_available & 0x8000)) {
188  mode = top[mode];
189  if (mode < 0) {
191  "top block unavailable for requested intra mode at %d %d\n",
192  h->mb_x, h->mb_y);
193  return AVERROR_INVALIDDATA;
194  }
195  }
196 
197  if ((h->left_samples_available & 0x8080) != 0x8080) {
198  mode = left[mode];
199  if (is_chroma && (h->left_samples_available & 0x8080)) {
200  // mad cow disease mode, aka MBAFF + constrained_intra_pred
201  mode = ALZHEIMER_DC_L0T_PRED8x8 +
202  (!(h->left_samples_available & 0x8000)) +
203  2 * (mode == DC_128_PRED8x8);
204  }
205  if (mode < 0) {
207  "left block unavailable for requested intra mode at %d %d\n",
208  h->mb_x, h->mb_y);
209  return AVERROR_INVALIDDATA;
210  }
211  }
212 
213  return mode;
214 }
215 
217  int *dst_length, int *consumed, int length)
218 {
219  int i, si, di;
220  uint8_t *dst;
221  int bufidx;
222 
223  // src[0]&0x80; // forbidden bit
224  h->nal_ref_idc = src[0] >> 5;
225  h->nal_unit_type = src[0] & 0x1F;
226 
227  src++;
228  length--;
229 
230 #define STARTCODE_TEST \
231  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
232  if (src[i + 2] != 3) { \
233  /* startcode, so we must be past the end */ \
234  length = i; \
235  } \
236  break; \
237  }
238 
239 #if HAVE_FAST_UNALIGNED
240 #define FIND_FIRST_ZERO \
241  if (i > 0 && !src[i]) \
242  i--; \
243  while (src[i]) \
244  i++
245 
246 #if HAVE_FAST_64BIT
247  for (i = 0; i + 1 < length; i += 9) {
248  if (!((~AV_RN64A(src + i) &
249  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
250  0x8000800080008080ULL))
251  continue;
252  FIND_FIRST_ZERO;
254  i -= 7;
255  }
256 #else
257  for (i = 0; i + 1 < length; i += 5) {
258  if (!((~AV_RN32A(src + i) &
259  (AV_RN32A(src + i) - 0x01000101U)) &
260  0x80008080U))
261  continue;
262  FIND_FIRST_ZERO;
264  i -= 3;
265  }
266 #endif
267 #else
268  for (i = 0; i + 1 < length; i += 2) {
269  if (src[i])
270  continue;
271  if (i > 0 && src[i - 1] == 0)
272  i--;
274  }
275 #endif
276 
277  if (i >= length - 1) { // no escaped 0
278  *dst_length = length;
279  *consumed = length + 1; // +1 for the header
280  return src;
281  }
282 
283  // use second escape buffer for inter data
284  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
285  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
287  dst = h->rbsp_buffer[bufidx];
288 
289  if (!dst)
290  return NULL;
291 
292  memcpy(dst, src, i);
293  si = di = i;
294  while (si + 2 < length) {
295  // remove escapes (very rare 1:2^22)
296  if (src[si + 2] > 3) {
297  dst[di++] = src[si++];
298  dst[di++] = src[si++];
299  } else if (src[si] == 0 && src[si + 1] == 0) {
300  if (src[si + 2] == 3) { // escape
301  dst[di++] = 0;
302  dst[di++] = 0;
303  si += 3;
304  continue;
305  } else // next start code
306  goto nsc;
307  }
308 
309  dst[di++] = src[si++];
310  }
311  while (si < length)
312  dst[di++] = src[si++];
313 
314 nsc:
315  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
316 
317  *dst_length = di;
318  *consumed = si + 1; // +1 for the header
319  /* FIXME store exact number of bits in the getbitcontext
320  * (it is needed for decoding) */
321  return dst;
322 }
323 
328 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
329 {
330  int v = *src;
331  int r;
332 
333  tprintf(h->avctx, "rbsp trailing %X\n", v);
334 
335  for (r = 1; r < 9; r++) {
336  if (v & 1)
337  return r;
338  v >>= 1;
339  }
340  return 0;
341 }
342 
343 void ff_h264_free_tables(H264Context *h, int free_rbsp)
344 {
345  int i;
346  H264Context *hx;
347 
350  av_freep(&h->cbp_table);
351  av_freep(&h->mvd_table[0]);
352  av_freep(&h->mvd_table[1]);
353  av_freep(&h->direct_table);
356  h->slice_table = NULL;
357  av_freep(&h->list_counts);
358 
359  av_freep(&h->mb2b_xy);
360  av_freep(&h->mb2br_xy);
361 
366 
367  if (free_rbsp && h->DPB) {
368  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
369  ff_h264_unref_picture(h, &h->DPB[i]);
370  av_freep(&h->DPB);
371  } else if (h->DPB) {
372  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
373  h->DPB[i].needs_realloc = 1;
374  }
375 
376  h->cur_pic_ptr = NULL;
377 
378  for (i = 0; i < H264_MAX_THREADS; i++) {
379  hx = h->thread_context[i];
380  if (!hx)
381  continue;
382  av_freep(&hx->top_borders[1]);
383  av_freep(&hx->top_borders[0]);
386  av_freep(&hx->dc_val_base);
387  av_freep(&hx->er.mb_index2xy);
389  av_freep(&hx->er.er_temp_buffer);
390  av_freep(&hx->er.mbintra_table);
391  av_freep(&hx->er.mbskip_table);
392 
393  if (free_rbsp) {
394  av_freep(&hx->rbsp_buffer[1]);
395  av_freep(&hx->rbsp_buffer[0]);
396  hx->rbsp_buffer_size[0] = 0;
397  hx->rbsp_buffer_size[1] = 0;
398  }
399  if (i)
400  av_freep(&h->thread_context[i]);
401  }
402 }
403 
405 {
406  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
407  const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count;
408  int x, y, i;
409 
411  row_mb_num * 8 * sizeof(uint8_t), fail)
413  big_mb_num * 48 * sizeof(uint8_t), fail)
415  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
417  big_mb_num * sizeof(uint16_t), fail)
419  big_mb_num * sizeof(uint8_t), fail)
421  16 * row_mb_num * sizeof(uint8_t), fail);
423  16 * row_mb_num * sizeof(uint8_t), fail);
425  4 * big_mb_num * sizeof(uint8_t), fail);
427  big_mb_num * sizeof(uint8_t), fail)
428 
429  memset(h->slice_table_base, -1,
430  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
431  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
432 
434  big_mb_num * sizeof(uint32_t), fail);
436  big_mb_num * sizeof(uint32_t), fail);
437  for (y = 0; y < h->mb_height; y++)
438  for (x = 0; x < h->mb_width; x++) {
439  const int mb_xy = x + y * h->mb_stride;
440  const int b_xy = 4 * x + 4 * y * h->b_stride;
441 
442  h->mb2b_xy[mb_xy] = b_xy;
443  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
444  }
445 
446  if (!h->dequant4_coeff[0])
448 
449  if (!h->DPB) {
450  h->DPB = av_mallocz_array(H264_MAX_PICTURE_COUNT, sizeof(*h->DPB));
451  if (!h->DPB)
452  return AVERROR(ENOMEM);
453  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
454  av_frame_unref(&h->DPB[i].f);
455  av_frame_unref(&h->cur_pic.f);
456  }
457 
458  return 0;
459 
460 fail:
461  ff_h264_free_tables(h, 1);
462  return AVERROR(ENOMEM);
463 }
464 
470 {
471  ERContext *er = &h->er;
472  int mb_array_size = h->mb_height * h->mb_stride;
473  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
474  int c_size = h->mb_stride * (h->mb_height + 1);
475  int yc_size = y_size + 2 * c_size;
476  int x, y, i;
477 
479  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
481  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
482 
483  h->ref_cache[0][scan8[5] + 1] =
484  h->ref_cache[0][scan8[7] + 1] =
485  h->ref_cache[0][scan8[13] + 1] =
486  h->ref_cache[1][scan8[5] + 1] =
487  h->ref_cache[1][scan8[7] + 1] =
488  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
489 
491  /* init ER */
492  er->avctx = h->avctx;
493  er->mecc = &h->mecc;
495  er->opaque = h;
496  er->quarter_sample = 1;
497 
498  er->mb_num = h->mb_num;
499  er->mb_width = h->mb_width;
500  er->mb_height = h->mb_height;
501  er->mb_stride = h->mb_stride;
502  er->b8_stride = h->mb_width * 2 + 1;
503 
504  // error resilience code looks cleaner with this
506  (h->mb_num + 1) * sizeof(int), fail);
507 
508  for (y = 0; y < h->mb_height; y++)
509  for (x = 0; x < h->mb_width; x++)
510  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
511 
512  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
513  h->mb_stride + h->mb_width;
514 
516  mb_array_size * sizeof(uint8_t), fail);
517 
518  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
519  memset(er->mbintra_table, 1, mb_array_size);
520 
521  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
522 
524  h->mb_height * h->mb_stride, fail);
525 
527  yc_size * sizeof(int16_t), fail);
528  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
529  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
530  er->dc_val[2] = er->dc_val[1] + c_size;
531  for (i = 0; i < yc_size; i++)
532  h->dc_val_base[i] = 1024;
533  }
534 
535  return 0;
536 
537 fail:
538  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
539 }
540 
541 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
542  int parse_extradata);
543 
545 {
546  AVCodecContext *avctx = h->avctx;
547  int ret;
548 
549  if (avctx->extradata[0] == 1) {
550  int i, cnt, nalsize;
551  unsigned char *p = avctx->extradata;
552 
553  h->is_avc = 1;
554 
555  if (avctx->extradata_size < 7) {
556  av_log(avctx, AV_LOG_ERROR,
557  "avcC %d too short\n", avctx->extradata_size);
558  return AVERROR_INVALIDDATA;
559  }
560  /* sps and pps in the avcC always have length coded with 2 bytes,
561  * so put a fake nal_length_size = 2 while parsing them */
562  h->nal_length_size = 2;
563  // Decode sps from avcC
564  cnt = *(p + 5) & 0x1f; // Number of sps
565  p += 6;
566  for (i = 0; i < cnt; i++) {
567  nalsize = AV_RB16(p) + 2;
568  if (p - avctx->extradata + nalsize > avctx->extradata_size)
569  return AVERROR_INVALIDDATA;
570  ret = decode_nal_units(h, p, nalsize, 1);
571  if (ret < 0) {
572  av_log(avctx, AV_LOG_ERROR,
573  "Decoding sps %d from avcC failed\n", i);
574  return ret;
575  }
576  p += nalsize;
577  }
578  // Decode pps from avcC
579  cnt = *(p++); // Number of pps
580  for (i = 0; i < cnt; i++) {
581  nalsize = AV_RB16(p) + 2;
582  if (p - avctx->extradata + nalsize > avctx->extradata_size)
583  return AVERROR_INVALIDDATA;
584  ret = decode_nal_units(h, p, nalsize, 1);
585  if (ret < 0) {
586  av_log(avctx, AV_LOG_ERROR,
587  "Decoding pps %d from avcC failed\n", i);
588  return ret;
589  }
590  p += nalsize;
591  }
592  // Store right nal length size that will be used to parse all other nals
593  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
594  } else {
595  h->is_avc = 0;
596  ret = decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1);
597  if (ret < 0)
598  return ret;
599  }
600  return 0;
601 }
602 
604 {
605  H264Context *h = avctx->priv_data;
606  int i;
607  int ret;
608 
609  h->avctx = avctx;
610 
611  h->bit_depth_luma = 8;
612  h->chroma_format_idc = 1;
613 
614  ff_h264dsp_init(&h->h264dsp, 8, 1);
616  ff_h264qpel_init(&h->h264qpel, 8);
617  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
618 
619  h->dequant_coeff_pps = -1;
620 
621  /* needed so that IDCT permutation is known early */
623  ff_me_cmp_init(&h->mecc, h->avctx);
624  ff_videodsp_init(&h->vdsp, 8);
625 
626  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
627  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
628 
630  h->slice_context_count = 1;
631  h->workaround_bugs = avctx->workaround_bugs;
632  h->flags = avctx->flags;
633 
634  /* set defaults */
635  // s->decode_mb = ff_h263_decode_mb;
636  if (!avctx->has_b_frames)
637  h->low_delay = 1;
638 
640 
642 
644 
645  h->pixel_shift = 0;
646  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
647 
648  h->thread_context[0] = h;
649  h->outputed_poc = h->next_outputed_poc = INT_MIN;
650  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
651  h->last_pocs[i] = INT_MIN;
652  h->prev_poc_msb = 1 << 16;
653  h->x264_build = -1;
655  h->recovery_frame = -1;
656  h->frame_recovered = 0;
657  if (avctx->codec_id == AV_CODEC_ID_H264) {
658  if (avctx->ticks_per_frame == 1)
659  h->avctx->time_base.den *= 2;
660  avctx->ticks_per_frame = 2;
661  }
662 
663  if (avctx->extradata_size > 0 && avctx->extradata) {
664  ret = ff_h264_decode_extradata(h);
665  if (ret < 0)
666  return ret;
667  }
668 
672  h->low_delay = 0;
673  }
674 
675  avctx->internal->allocate_progress = 1;
676 
677  return 0;
678 }
679 
681 {
682  H264Context *h = avctx->priv_data;
683 
684  if (!avctx->internal->is_copy)
685  return 0;
686  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
687  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
688 
689  h->context_initialized = 0;
690 
691  return 0;
692 }
693 
702 static void decode_postinit(H264Context *h, int setup_finished)
703 {
705  H264Picture *cur = h->cur_pic_ptr;
706  int i, pics, out_of_order, out_idx;
707  int invalid = 0, cnt = 0;
708 
709  h->cur_pic_ptr->f.pict_type = h->pict_type;
710 
711  if (h->next_output_pic)
712  return;
713 
714  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
715  /* FIXME: if we have two PAFF fields in one packet, we can't start
716  * the next thread here. If we have one field per packet, we can.
717  * The check in decode_nal_units() is not good enough to find this
718  * yet, so we assume the worst for now. */
719  // if (setup_finished)
720  // ff_thread_finish_setup(h->avctx);
721  return;
722  }
723 
724  cur->f.interlaced_frame = 0;
725  cur->f.repeat_pict = 0;
726 
727  /* Signal interlacing information externally. */
728  /* Prioritize picture timing SEI information over used
729  * decoding process if it exists. */
730 
731  if (h->sps.pic_struct_present_flag) {
732  switch (h->sei_pic_struct) {
734  break;
737  cur->f.interlaced_frame = 1;
738  break;
741  if (FIELD_OR_MBAFF_PICTURE(h))
742  cur->f.interlaced_frame = 1;
743  else
744  // try to flag soft telecine progressive
746  break;
749  /* Signal the possibility of telecined film externally
750  * (pic_struct 5,6). From these hints, let the applications
751  * decide if they apply deinterlacing. */
752  cur->f.repeat_pict = 1;
753  break;
755  cur->f.repeat_pict = 2;
756  break;
758  cur->f.repeat_pict = 4;
759  break;
760  }
761 
762  if ((h->sei_ct_type & 3) &&
764  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
765  } else {
766  /* Derive interlacing flag from used decoding process. */
768  }
770 
771  if (cur->field_poc[0] != cur->field_poc[1]) {
772  /* Derive top_field_first from field pocs. */
773  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
774  } else {
776  /* Use picture timing SEI information. Even if it is a
777  * information of a past frame, better than nothing. */
780  cur->f.top_field_first = 1;
781  else
782  cur->f.top_field_first = 0;
783  } else {
784  /* Most likely progressive */
785  cur->f.top_field_first = 0;
786  }
787  }
788 
789  if (h->sei_frame_packing_present &&
794  AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
795  if (!stereo)
796  return;
797 
798  switch (h->frame_packing_arrangement_type) {
799  case 0:
800  stereo->type = AV_STEREO3D_CHECKERBOARD;
801  break;
802  case 1:
803  stereo->type = AV_STEREO3D_COLUMNS;
804  break;
805  case 2:
806  stereo->type = AV_STEREO3D_LINES;
807  break;
808  case 3:
809  if (h->quincunx_subsampling)
811  else
812  stereo->type = AV_STEREO3D_SIDEBYSIDE;
813  break;
814  case 4:
815  stereo->type = AV_STEREO3D_TOPBOTTOM;
816  break;
817  case 5:
819  break;
820  case 6:
821  stereo->type = AV_STEREO3D_2D;
822  break;
823  }
824 
825  if (h->content_interpretation_type == 2)
826  stereo->flags = AV_STEREO3D_FLAG_INVERT;
827  }
828 
831  double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
832  AVFrameSideData *rotation = av_frame_new_side_data(&cur->f,
834  sizeof(int32_t) * 9);
835  if (!rotation)
836  return;
837 
838  av_display_rotation_set((int32_t *)rotation->data, angle);
839  av_display_matrix_flip((int32_t *)rotation->data,
840  h->sei_vflip, h->sei_hflip);
841  }
842 
843  // FIXME do something with unavailable reference frames
844 
845  /* Sort B-frames into display order */
846 
850  h->low_delay = 0;
851  }
852 
856  h->low_delay = 0;
857  }
858 
859  pics = 0;
860  while (h->delayed_pic[pics])
861  pics++;
862 
863  assert(pics <= MAX_DELAYED_PIC_COUNT);
864 
865  h->delayed_pic[pics++] = cur;
866  if (cur->reference == 0)
867  cur->reference = DELAYED_PIC_REF;
868 
869  /* Frame reordering. This code takes pictures from coding order and sorts
870  * them by their incremental POC value into display order. It supports POC
871  * gaps, MMCO reset codes and random resets.
872  * A "display group" can start either with a IDR frame (f.key_frame = 1),
873  * and/or can be closed down with a MMCO reset code. In sequences where
874  * there is no delay, we can't detect that (since the frame was already
875  * output to the user), so we also set h->mmco_reset to detect the MMCO
876  * reset code.
877  * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
878  * we increase the delay between input and output. All frames affected by
879  * the lag (e.g. those that should have been output before another frame
880  * that we already returned to the user) will be dropped. This is a bug
881  * that we will fix later. */
882  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
883  cnt += out->poc < h->last_pocs[i];
884  invalid += out->poc == INT_MIN;
885  }
886  if (!h->mmco_reset && !cur->f.key_frame &&
887  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
888  h->mmco_reset = 2;
889  if (pics > 1)
890  h->delayed_pic[pics - 2]->mmco_reset = 2;
891  }
892  if (h->mmco_reset || cur->f.key_frame) {
893  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
894  h->last_pocs[i] = INT_MIN;
895  cnt = 0;
896  invalid = MAX_DELAYED_PIC_COUNT;
897  }
898  out = h->delayed_pic[0];
899  out_idx = 0;
900  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
901  h->delayed_pic[i] &&
902  !h->delayed_pic[i - 1]->mmco_reset &&
903  !h->delayed_pic[i]->f.key_frame;
904  i++)
905  if (h->delayed_pic[i]->poc < out->poc) {
906  out = h->delayed_pic[i];
907  out_idx = i;
908  }
909  if (h->avctx->has_b_frames == 0 &&
910  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
911  h->next_outputed_poc = INT_MIN;
912  out_of_order = !out->f.key_frame && !h->mmco_reset &&
913  (out->poc < h->next_outputed_poc);
914 
917  } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
918  h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
919  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
920  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
921  }
922  h->low_delay = 0;
923  } else if (h->low_delay &&
924  ((h->next_outputed_poc != INT_MIN &&
925  out->poc > h->next_outputed_poc + 2) ||
926  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
927  h->low_delay = 0;
928  h->avctx->has_b_frames++;
929  }
930 
931  if (pics > h->avctx->has_b_frames) {
932  out->reference &= ~DELAYED_PIC_REF;
933  // for frame threading, the owner must be the second field's thread or
934  // else the first thread can release the picture and reuse it unsafely
935  for (i = out_idx; h->delayed_pic[i]; i++)
936  h->delayed_pic[i] = h->delayed_pic[i + 1];
937  }
938  memmove(h->last_pocs, &h->last_pocs[1],
939  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
940  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
941  if (!out_of_order && pics > h->avctx->has_b_frames) {
942  h->next_output_pic = out;
943  if (out->mmco_reset) {
944  if (out_idx > 0) {
945  h->next_outputed_poc = out->poc;
946  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
947  } else {
948  h->next_outputed_poc = INT_MIN;
949  }
950  } else {
951  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
952  h->next_outputed_poc = INT_MIN;
953  } else {
954  h->next_outputed_poc = out->poc;
955  }
956  }
957  h->mmco_reset = 0;
958  } else {
959  av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
960  }
961 
962  if (h->next_output_pic) {
963  if (h->next_output_pic->recovered) {
964  // We have reached an recovery point and all frames after it in
965  // display order are "recovered".
967  }
969  }
970 
971  if (setup_finished && !h->avctx->hwaccel)
973 }
974 
976 {
977  int list, i;
978  int luma_def, chroma_def;
979 
980  h->use_weight = 0;
981  h->use_weight_chroma = 0;
983  if (h->sps.chroma_format_idc)
985  luma_def = 1 << h->luma_log2_weight_denom;
986  chroma_def = 1 << h->chroma_log2_weight_denom;
987 
988  for (list = 0; list < 2; list++) {
989  h->luma_weight_flag[list] = 0;
990  h->chroma_weight_flag[list] = 0;
991  for (i = 0; i < h->ref_count[list]; i++) {
992  int luma_weight_flag, chroma_weight_flag;
993 
994  luma_weight_flag = get_bits1(&h->gb);
995  if (luma_weight_flag) {
996  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
997  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
998  if (h->luma_weight[i][list][0] != luma_def ||
999  h->luma_weight[i][list][1] != 0) {
1000  h->use_weight = 1;
1001  h->luma_weight_flag[list] = 1;
1002  }
1003  } else {
1004  h->luma_weight[i][list][0] = luma_def;
1005  h->luma_weight[i][list][1] = 0;
1006  }
1007 
1008  if (h->sps.chroma_format_idc) {
1009  chroma_weight_flag = get_bits1(&h->gb);
1010  if (chroma_weight_flag) {
1011  int j;
1012  for (j = 0; j < 2; j++) {
1013  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
1014  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
1015  if (h->chroma_weight[i][list][j][0] != chroma_def ||
1016  h->chroma_weight[i][list][j][1] != 0) {
1017  h->use_weight_chroma = 1;
1018  h->chroma_weight_flag[list] = 1;
1019  }
1020  }
1021  } else {
1022  int j;
1023  for (j = 0; j < 2; j++) {
1024  h->chroma_weight[i][list][j][0] = chroma_def;
1025  h->chroma_weight[i][list][j][1] = 0;
1026  }
1027  }
1028  }
1029  }
1031  break;
1032  }
1033  h->use_weight = h->use_weight || h->use_weight_chroma;
1034  return 0;
1035 }
1036 
1040 static void idr(H264Context *h)
1041 {
1043  h->prev_frame_num =
1045  h->prev_poc_msb =
1046  h->prev_poc_lsb = 0;
1047 }
1048 
1049 /* forget old pics after a seek */
1051 {
1052  int i;
1053  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1054  h->last_pocs[i] = INT_MIN;
1055  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1056  h->prev_interlaced_frame = 1;
1057  idr(h);
1058  if (h->cur_pic_ptr)
1059  h->cur_pic_ptr->reference = 0;
1060  h->first_field = 0;
1061  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
1062  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
1063  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
1064  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
1065  ff_h264_reset_sei(h);
1066  h->recovery_frame = -1;
1067  h->frame_recovered = 0;
1068 }
1069 
1070 /* forget old pics after a seek */
1071 static void flush_dpb(AVCodecContext *avctx)
1072 {
1073  H264Context *h = avctx->priv_data;
1074  int i;
1075 
1076  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
1077  if (h->delayed_pic[i])
1078  h->delayed_pic[i]->reference = 0;
1079  h->delayed_pic[i] = NULL;
1080  }
1081 
1083 
1084  if (h->DPB)
1085  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
1086  ff_h264_unref_picture(h, &h->DPB[i]);
1087  h->cur_pic_ptr = NULL;
1089 
1090  h->mb_x = h->mb_y = 0;
1091 
1092  h->parse_context.state = -1;
1094  h->parse_context.overread = 0;
1096  h->parse_context.index = 0;
1097  h->parse_context.last_index = 0;
1098 
1099  ff_h264_free_tables(h, 1);
1100  h->context_initialized = 0;
1101 }
1102 
1103 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
1104 {
1105  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
1106  int field_poc[2];
1107 
1109  if (h->frame_num < h->prev_frame_num)
1110  h->frame_num_offset += max_frame_num;
1111 
1112  if (h->sps.poc_type == 0) {
1113  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
1114 
1115  if (h->poc_lsb < h->prev_poc_lsb &&
1116  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
1117  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
1118  else if (h->poc_lsb > h->prev_poc_lsb &&
1119  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
1120  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
1121  else
1122  h->poc_msb = h->prev_poc_msb;
1123  field_poc[0] =
1124  field_poc[1] = h->poc_msb + h->poc_lsb;
1125  if (h->picture_structure == PICT_FRAME)
1126  field_poc[1] += h->delta_poc_bottom;
1127  } else if (h->sps.poc_type == 1) {
1128  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
1129  int i;
1130 
1131  if (h->sps.poc_cycle_length != 0)
1132  abs_frame_num = h->frame_num_offset + h->frame_num;
1133  else
1134  abs_frame_num = 0;
1135 
1136  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
1137  abs_frame_num--;
1138 
1139  expected_delta_per_poc_cycle = 0;
1140  for (i = 0; i < h->sps.poc_cycle_length; i++)
1141  // FIXME integrate during sps parse
1142  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
1143 
1144  if (abs_frame_num > 0) {
1145  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
1146  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
1147 
1148  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
1149  for (i = 0; i <= frame_num_in_poc_cycle; i++)
1150  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
1151  } else
1152  expectedpoc = 0;
1153 
1154  if (h->nal_ref_idc == 0)
1155  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
1156 
1157  field_poc[0] = expectedpoc + h->delta_poc[0];
1158  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
1159 
1160  if (h->picture_structure == PICT_FRAME)
1161  field_poc[1] += h->delta_poc[1];
1162  } else {
1163  int poc = 2 * (h->frame_num_offset + h->frame_num);
1164 
1165  if (!h->nal_ref_idc)
1166  poc--;
1167 
1168  field_poc[0] = poc;
1169  field_poc[1] = poc;
1170  }
1171 
1173  pic_field_poc[0] = field_poc[0];
1175  pic_field_poc[1] = field_poc[1];
1176  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
1177 
1178  return 0;
1179 }
1180 
1189 {
1190  int profile = sps->profile_idc;
1191 
1192  switch (sps->profile_idc) {
1194  // constraint_set1_flag set to 1
1195  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
1196  break;
1200  // constraint_set3_flag set to 1
1201  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
1202  break;
1203  }
1204 
1205  return profile;
1206 }
1207 
1209 {
1210  if (h->flags & CODEC_FLAG_LOW_DELAY ||
1212  !h->sps.num_reorder_frames)) {
1213  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
1214  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
1215  "Reenabling low delay requires a codec flush.\n");
1216  else
1217  h->low_delay = 1;
1218  }
1219 
1220  if (h->avctx->has_b_frames < 2)
1221  h->avctx->has_b_frames = !h->low_delay;
1222 
1223  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
1225  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
1228  h->pixel_shift = h->sps.bit_depth_luma > 8;
1229 
1231  h->sps.chroma_format_idc);
1235  h->sps.chroma_format_idc);
1237  ff_me_cmp_init(&h->mecc, h->avctx);
1239  } else {
1240  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
1241  h->sps.bit_depth_luma);
1242  return AVERROR_INVALIDDATA;
1243  }
1244  }
1245  return 0;
1246 }
1247 
1249 {
1250  int ref_count[2], list_count;
1251  int num_ref_idx_active_override_flag, max_refs;
1252 
1253  // set defaults, might be overridden a few lines later
1254  ref_count[0] = h->pps.ref_count[0];
1255  ref_count[1] = h->pps.ref_count[1];
1256 
1257  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
1260  num_ref_idx_active_override_flag = get_bits1(&h->gb);
1261 
1262  if (num_ref_idx_active_override_flag) {
1263  ref_count[0] = get_ue_golomb(&h->gb) + 1;
1264  if (ref_count[0] < 1)
1265  return AVERROR_INVALIDDATA;
1266  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
1267  ref_count[1] = get_ue_golomb(&h->gb) + 1;
1268  if (ref_count[1] < 1)
1269  return AVERROR_INVALIDDATA;
1270  }
1271  }
1272 
1274  list_count = 2;
1275  else
1276  list_count = 1;
1277  } else {
1278  list_count = 0;
1279  ref_count[0] = ref_count[1] = 0;
1280  }
1281 
1282  max_refs = h->picture_structure == PICT_FRAME ? 16 : 32;
1283 
1284  if (ref_count[0] > max_refs || ref_count[1] > max_refs) {
1285  av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n");
1286  h->ref_count[0] = h->ref_count[1] = 0;
1287  return AVERROR_INVALIDDATA;
1288  }
1289 
1290  if (list_count != h->list_count ||
1291  ref_count[0] != h->ref_count[0] ||
1292  ref_count[1] != h->ref_count[1]) {
1293  h->ref_count[0] = ref_count[0];
1294  h->ref_count[1] = ref_count[1];
1295  h->list_count = list_count;
1296  return 1;
1297  }
1298 
1299  return 0;
1300 }
1301 
1302 static int find_start_code(const uint8_t *buf, int buf_size,
1303  int buf_index, int next_avc)
1304 {
1305  // start code prefix search
1306  for (; buf_index + 3 < next_avc; buf_index++)
1307  // This should always succeed in the first iteration.
1308  if (buf[buf_index] == 0 &&
1309  buf[buf_index + 1] == 0 &&
1310  buf[buf_index + 2] == 1)
1311  break;
1312 
1313  if (buf_index + 3 >= buf_size)
1314  return buf_size;
1315 
1316  return buf_index + 3;
1317 }
1318 
1319 static int get_avc_nalsize(H264Context *h, const uint8_t *buf,
1320  int buf_size, int *buf_index)
1321 {
1322  int i, nalsize = 0;
1323 
1324  if (*buf_index >= buf_size - h->nal_length_size)
1325  return -1;
1326 
1327  for (i = 0; i < h->nal_length_size; i++)
1328  nalsize = (nalsize << 8) | buf[(*buf_index)++];
1329  if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
1331  "AVC: nal size %d\n", nalsize);
1332  return -1;
1333  }
1334  return nalsize;
1335 }
1336 
1337 static int get_bit_length(H264Context *h, const uint8_t *buf,
1338  const uint8_t *ptr, int dst_length,
1339  int i, int next_avc)
1340 {
1341  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
1342  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
1343  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
1345 
1346  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
1347  while (dst_length > 0 && ptr[dst_length - 1] == 0)
1348  dst_length--;
1349 
1350  if (!dst_length)
1351  return 0;
1352 
1353  return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
1354 }
1355 
1356 static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
1357 {
1358  int next_avc = h->is_avc ? 0 : buf_size;
1359  int nal_index = 0;
1360  int buf_index = 0;
1361  int nals_needed = 0;
1362 
1363  while(1) {
1364  int nalsize = 0;
1365  int dst_length, bit_length, consumed;
1366  const uint8_t *ptr;
1367 
1368  if (buf_index >= next_avc) {
1369  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1370  if (nalsize < 0)
1371  break;
1372  next_avc = buf_index + nalsize;
1373  } else {
1374  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1375  if (buf_index >= buf_size)
1376  break;
1377  }
1378 
1379  ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed,
1380  next_avc - buf_index);
1381 
1382  if (!ptr || dst_length < 0)
1383  return AVERROR_INVALIDDATA;
1384 
1385  buf_index += consumed;
1386 
1387  bit_length = get_bit_length(h, buf, ptr, dst_length,
1388  buf_index, next_avc);
1389  nal_index++;
1390 
1391  /* packets can sometimes contain multiple PPS/SPS,
1392  * e.g. two PAFF field pictures in one packet, or a demuxer
1393  * which splits NALs strangely if so, when frame threading we
1394  * can't start the next thread until we've read all of them */
1395  switch (h->nal_unit_type) {
1396  case NAL_SPS:
1397  case NAL_PPS:
1398  nals_needed = nal_index;
1399  break;
1400  case NAL_DPA:
1401  case NAL_IDR_SLICE:
1402  case NAL_SLICE:
1403  init_get_bits(&h->gb, ptr, bit_length);
1404  if (!get_ue_golomb(&h->gb))
1405  nals_needed = nal_index;
1406  }
1407  }
1408 
1409  return nals_needed;
1410 }
1411 
1412 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1413  int parse_extradata)
1414 {
1415  AVCodecContext *const avctx = h->avctx;
1416  H264Context *hx;
1417  int buf_index;
1418  unsigned context_count;
1419  int next_avc;
1420  int nals_needed = 0;
1421  int nal_index;
1422  int ret = 0;
1423 
1425  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
1426  h->current_slice = 0;
1427  if (!h->first_field)
1428  h->cur_pic_ptr = NULL;
1429  ff_h264_reset_sei(h);
1430  }
1431 
1432  if (avctx->active_thread_type & FF_THREAD_FRAME)
1433  nals_needed = get_last_needed_nal(h, buf, buf_size);
1434 
1435  {
1436  buf_index = 0;
1437  context_count = 0;
1438  next_avc = h->is_avc ? 0 : buf_size;
1439  nal_index = 0;
1440  for (;;) {
1441  int consumed;
1442  int dst_length;
1443  int bit_length;
1444  const uint8_t *ptr;
1445  int nalsize = 0;
1446  int err;
1447 
1448  if (buf_index >= next_avc) {
1449  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1450  if (nalsize < 0)
1451  break;
1452  next_avc = buf_index + nalsize;
1453  } else {
1454  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1455  if (buf_index >= buf_size)
1456  break;
1457  }
1458 
1459  hx = h->thread_context[context_count];
1460 
1461  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
1462  &consumed, next_avc - buf_index);
1463  if (!ptr || dst_length < 0) {
1464  ret = -1;
1465  goto end;
1466  }
1467 
1468  bit_length = get_bit_length(h, buf, ptr, dst_length,
1469  buf_index + consumed, next_avc);
1470 
1471  if (h->avctx->debug & FF_DEBUG_STARTCODE)
1473  "NAL %d at %d/%d length %d\n",
1474  hx->nal_unit_type, buf_index, buf_size, dst_length);
1475 
1476  if (h->is_avc && (nalsize != consumed) && nalsize)
1478  "AVC: Consumed only %d bytes instead of %d\n",
1479  consumed, nalsize);
1480 
1481  buf_index += consumed;
1482  nal_index++;
1483 
1484  if (avctx->skip_frame >= AVDISCARD_NONREF &&
1485  h->nal_ref_idc == 0 &&
1486  h->nal_unit_type != NAL_SEI)
1487  continue;
1488 
1489 again:
1490  /* Ignore every NAL unit type except PPS and SPS during extradata
1491  * parsing. Decoding slices is not possible in codec init
1492  * with frame-mt */
1493  if (parse_extradata && HAVE_THREADS &&
1495  (hx->nal_unit_type != NAL_PPS &&
1496  hx->nal_unit_type != NAL_SPS)) {
1497  if (hx->nal_unit_type < NAL_AUD ||
1499  av_log(avctx, AV_LOG_INFO,
1500  "Ignoring NAL unit %d during extradata parsing\n",
1501  hx->nal_unit_type);
1503  }
1504  err = 0;
1505  switch (hx->nal_unit_type) {
1506  case NAL_IDR_SLICE:
1507  if (h->nal_unit_type != NAL_IDR_SLICE) {
1509  "Invalid mix of idr and non-idr slices\n");
1510  ret = -1;
1511  goto end;
1512  }
1513  idr(h); // FIXME ensure we don't lose some frames if there is reordering
1514  case NAL_SLICE:
1515  init_get_bits(&hx->gb, ptr, bit_length);
1516  hx->intra_gb_ptr =
1517  hx->inter_gb_ptr = &hx->gb;
1518  hx->data_partitioning = 0;
1519 
1520  if ((err = ff_h264_decode_slice_header(hx, h)))
1521  break;
1522 
1523  if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
1525  ((1 << h->sps.log2_max_frame_num) - 1);
1526  }
1527 
1528  h->cur_pic_ptr->f.key_frame |=
1529  (hx->nal_unit_type == NAL_IDR_SLICE) ||
1530  (h->sei_recovery_frame_cnt >= 0);
1531 
1532  if (hx->nal_unit_type == NAL_IDR_SLICE ||
1533  h->recovery_frame == h->frame_num) {
1534  h->recovery_frame = -1;
1535  h->cur_pic_ptr->recovered = 1;
1536  }
1537  // If we have an IDR, all frames after it in decoded order are
1538  // "recovered".
1539  if (hx->nal_unit_type == NAL_IDR_SLICE)
1542 
1543  if (h->current_slice == 1) {
1544  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
1545  decode_postinit(h, nal_index >= nals_needed);
1546 
1547  if (h->avctx->hwaccel &&
1548  (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
1549  return ret;
1550  }
1551 
1552  if (hx->redundant_pic_count == 0 &&
1553  (avctx->skip_frame < AVDISCARD_NONREF ||
1554  hx->nal_ref_idc) &&
1555  (avctx->skip_frame < AVDISCARD_BIDIR ||
1557  (avctx->skip_frame < AVDISCARD_NONKEY ||
1559  avctx->skip_frame < AVDISCARD_ALL) {
1560  if (avctx->hwaccel) {
1561  ret = avctx->hwaccel->decode_slice(avctx,
1562  &buf[buf_index - consumed],
1563  consumed);
1564  if (ret < 0)
1565  return ret;
1566  } else
1567  context_count++;
1568  }
1569  break;
1570  case NAL_DPA:
1571  if (h->avctx->flags & CODEC_FLAG2_CHUNKS) {
1573  "Decoding in chunks is not supported for "
1574  "partitioned slices.\n");
1575  return AVERROR(ENOSYS);
1576  }
1577 
1578  init_get_bits(&hx->gb, ptr, bit_length);
1579  hx->intra_gb_ptr =
1580  hx->inter_gb_ptr = NULL;
1581 
1582  if ((err = ff_h264_decode_slice_header(hx, h)) < 0) {
1583  /* make sure data_partitioning is cleared if it was set
1584  * before, so we don't try decoding a slice without a valid
1585  * slice header later */
1586  h->data_partitioning = 0;
1587  break;
1588  }
1589 
1590  hx->data_partitioning = 1;
1591  break;
1592  case NAL_DPB:
1593  init_get_bits(&hx->intra_gb, ptr, bit_length);
1594  hx->intra_gb_ptr = &hx->intra_gb;
1595  break;
1596  case NAL_DPC:
1597  init_get_bits(&hx->inter_gb, ptr, bit_length);
1598  hx->inter_gb_ptr = &hx->inter_gb;
1599 
1600  if (hx->redundant_pic_count == 0 &&
1601  hx->intra_gb_ptr &&
1602  hx->data_partitioning &&
1603  h->cur_pic_ptr && h->context_initialized &&
1604  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
1605  (avctx->skip_frame < AVDISCARD_BIDIR ||
1607  (avctx->skip_frame < AVDISCARD_NONKEY ||
1609  avctx->skip_frame < AVDISCARD_ALL)
1610  context_count++;
1611  break;
1612  case NAL_SEI:
1613  init_get_bits(&h->gb, ptr, bit_length);
1614  ret = ff_h264_decode_sei(h);
1615  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1616  goto end;
1617  break;
1618  case NAL_SPS:
1619  init_get_bits(&h->gb, ptr, bit_length);
1621  if (ret < 0 && h->is_avc && (nalsize != consumed) && nalsize) {
1623  "SPS decoding failure, trying again with the complete NAL\n");
1624  init_get_bits(&h->gb, buf + buf_index + 1 - consumed,
1625  8 * (nalsize - 1));
1627  }
1628 
1630  if (ret < 0)
1631  goto end;
1632 
1633  break;
1634  case NAL_PPS:
1635  init_get_bits(&h->gb, ptr, bit_length);
1636  ret = ff_h264_decode_picture_parameter_set(h, bit_length);
1637  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1638  goto end;
1639  break;
1640  case NAL_AUD:
1641  case NAL_END_SEQUENCE:
1642  case NAL_END_STREAM:
1643  case NAL_FILLER_DATA:
1644  case NAL_SPS_EXT:
1645  case NAL_AUXILIARY_SLICE:
1646  break;
1647  case NAL_FF_IGNORE:
1648  break;
1649  default:
1650  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1651  hx->nal_unit_type, bit_length);
1652  }
1653 
1654  if (context_count == h->max_contexts) {
1655  ret = ff_h264_execute_decode_slices(h, context_count);
1656  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1657  goto end;
1658  context_count = 0;
1659  }
1660 
1661  if (err < 0) {
1662  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1663  h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
1664  } else if (err == 1) {
1665  /* Slice could not be decoded in parallel mode, copy down
1666  * NAL unit stuff to context 0 and restart. Note that
1667  * rbsp_buffer is not transferred, but since we no longer
1668  * run in parallel mode this should not be an issue. */
1669  h->nal_unit_type = hx->nal_unit_type;
1670  h->nal_ref_idc = hx->nal_ref_idc;
1671  hx = h;
1672  goto again;
1673  }
1674  }
1675  }
1676  if (context_count) {
1677  ret = ff_h264_execute_decode_slices(h, context_count);
1678  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1679  goto end;
1680  }
1681 
1682 end:
1683  /* clean up */
1684  if (h->cur_pic_ptr && !h->droppable) {
1687  }
1688 
1689  return (ret < 0) ? ret : buf_index;
1690 }
1691 
1695 static int get_consumed_bytes(int pos, int buf_size)
1696 {
1697  if (pos == 0)
1698  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1699  if (pos + 10 > buf_size)
1700  pos = buf_size; // oops ;)
1701 
1702  return pos;
1703 }
1704 
1705 static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
1706 {
1707  int i;
1708  int ret = av_frame_ref(dst, src);
1709  if (ret < 0)
1710  return ret;
1711 
1712  if (!h->sps.crop)
1713  return 0;
1714 
1715  for (i = 0; i < 3; i++) {
1716  int hshift = (i > 0) ? h->chroma_x_shift : 0;
1717  int vshift = (i > 0) ? h->chroma_y_shift : 0;
1718  int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) +
1719  (h->sps.crop_top >> vshift) * dst->linesize[i];
1720  dst->data[i] += off;
1721  }
1722  return 0;
1723 }
1724 
1725 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1726  int *got_frame, AVPacket *avpkt)
1727 {
1728  const uint8_t *buf = avpkt->data;
1729  int buf_size = avpkt->size;
1730  H264Context *h = avctx->priv_data;
1731  AVFrame *pict = data;
1732  int buf_index = 0;
1733  int ret;
1734 
1735  h->flags = avctx->flags;
1736  /* reset data partitioning here, to ensure GetBitContexts from previous
1737  * packets do not get used. */
1738  h->data_partitioning = 0;
1739 
1740  /* end of stream, output what is still in the buffers */
1741 out:
1742  if (buf_size == 0) {
1743  H264Picture *out;
1744  int i, out_idx;
1745 
1746  h->cur_pic_ptr = NULL;
1747 
1748  // FIXME factorize this with the output code below
1749  out = h->delayed_pic[0];
1750  out_idx = 0;
1751  for (i = 1;
1752  h->delayed_pic[i] &&
1753  !h->delayed_pic[i]->f.key_frame &&
1754  !h->delayed_pic[i]->mmco_reset;
1755  i++)
1756  if (h->delayed_pic[i]->poc < out->poc) {
1757  out = h->delayed_pic[i];
1758  out_idx = i;
1759  }
1760 
1761  for (i = out_idx; h->delayed_pic[i]; i++)
1762  h->delayed_pic[i] = h->delayed_pic[i + 1];
1763 
1764  if (out) {
1765  ret = output_frame(h, pict, &out->f);
1766  if (ret < 0)
1767  return ret;
1768  *got_frame = 1;
1769  }
1770 
1771  return buf_index;
1772  }
1773 
1774  buf_index = decode_nal_units(h, buf, buf_size, 0);
1775  if (buf_index < 0)
1776  return AVERROR_INVALIDDATA;
1777 
1778  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1779  buf_size = 0;
1780  goto out;
1781  }
1782 
1783  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1784  if (avctx->skip_frame >= AVDISCARD_NONREF)
1785  return 0;
1786  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1787  return AVERROR_INVALIDDATA;
1788  }
1789 
1790  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
1791  (h->mb_y >= h->mb_height && h->mb_height)) {
1792  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
1793  decode_postinit(h, 1);
1794 
1795  ff_h264_field_end(h, 0);
1796 
1797  *got_frame = 0;
1798  if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) ||
1799  h->next_output_pic->recovered)) {
1800  if (!h->next_output_pic->recovered)
1802 
1803  ret = output_frame(h, pict, &h->next_output_pic->f);
1804  if (ret < 0)
1805  return ret;
1806  *got_frame = 1;
1807  }
1808  }
1809 
1810  assert(pict->buf[0] || !*got_frame);
1811 
1812  return get_consumed_bytes(buf_index, buf_size);
1813 }
1814 
1816 {
1817  int i;
1818 
1819  ff_h264_free_tables(h, 1); // FIXME cleanup init stuff perhaps
1820 
1821  for (i = 0; i < MAX_SPS_COUNT; i++)
1822  av_freep(h->sps_buffers + i);
1823 
1824  for (i = 0; i < MAX_PPS_COUNT; i++)
1825  av_freep(h->pps_buffers + i);
1826 }
1827 
1829 {
1830  H264Context *h = avctx->priv_data;
1831 
1833 
1835 
1836  return 0;
1837 }
1838 
1839 static const AVProfile profiles[] = {
1840  { FF_PROFILE_H264_BASELINE, "Baseline" },
1841  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
1842  { FF_PROFILE_H264_MAIN, "Main" },
1843  { FF_PROFILE_H264_EXTENDED, "Extended" },
1844  { FF_PROFILE_H264_HIGH, "High" },
1845  { FF_PROFILE_H264_HIGH_10, "High 10" },
1846  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
1847  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
1848  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
1849  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
1850  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
1851  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
1852  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
1853  { FF_PROFILE_UNKNOWN },
1854 };
1855 
1857  .name = "h264",
1858  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1859  .type = AVMEDIA_TYPE_VIDEO,
1860  .id = AV_CODEC_ID_H264,
1861  .priv_data_size = sizeof(H264Context),
1865  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
1868  .flush = flush_dpb,
1869  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1870  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1871  .profiles = NULL_IF_CONFIG_SMALL(profiles),
1872 };
int chroma_format_idc
Definition: h264.h:160
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2649
GetBitContext inter_gb
Definition: h264.h:455
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:893
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:46
MECmpContext * mecc
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:299
discard all frames except keyframes
Definition: avcodec.h:567
uint8_t * edge_emu_buffer
Definition: h264.h:700
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:1050
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2950
int workaround_bugs
Definition: h264.h:336
unsigned int top_samples_available
Definition: h264.h:362
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:2659
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:1412
#define DC_128_PRED8x8
Definition: h264pred.h:76
void ff_h264_free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:343
GetBitContext gb
Definition: h264.h:311
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:136
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:1557
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2340
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:147
int low_delay
Definition: h264.h:332
int mb_num
Definition: h264.h:504
GetBitContext * intra_gb_ptr
Definition: h264.h:456
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1599
mpeg2/4, h264 default
Definition: pixfmt.h:378
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
Definition: h264.h:111
int delta_poc[2]
Definition: h264.h:543
Views are alternated temporally.
Definition: stereo3d.h:66
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:179
int quincunx_subsampling
Definition: h264.h:635
3: top field, bottom field, in that order
Definition: h264.h:145
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:216
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:46
int first_field
Definition: h264.h:420
misc image utilities
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
Definition: h264.h:115
H264ChromaContext h264chroma
Definition: h264.h:308
uint16_t * cbp_table
Definition: h264.h:471
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:603
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:691
7: frame doubling
Definition: h264.h:149
#define MAX_PPS_COUNT
Definition: h264.h:50
Sequence parameter set.
Definition: h264.h:156
int mb_y
Definition: h264.h:498
int bitstream_restriction_flag
Definition: h264.h:196
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2645
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:225
#define FMO
Definition: h264.h:60
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:315
H264Picture * DPB
Definition: h264.h:314
static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:1356
int size
Definition: avcodec.h:968
AVBufferPool * mb_type_pool
Definition: h264.h:704
int outputed_poc
Definition: h264.h:569
int chroma_x_shift
Definition: h264.h:326
int flags
Definition: h264.h:335
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1248
int mb_height
Definition: h264.h:502
int16_t * dc_val_base
Definition: h264.h:701
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:566
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:525
AVBufferPool * ref_index_pool
Definition: h264.h:706
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:1188
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:378
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:325
H264Context.
Definition: h264.h:303
discard all
Definition: avcodec.h:568
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:545
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2508
4: bottom field, top field, in that order
Definition: h264.h:146
struct AVFrame f
Definition: h264.h:264
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:682
AVCodec.
Definition: avcodec.h:2790
int frame_start_found
Definition: parser.h:34
int picture_structure
Definition: h264.h:419
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:412
int profile_idc
Definition: h264.h:158
#define CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:613
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:884
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:2657
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1169
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
uint8_t * chroma_pred_mode_table
Definition: h264.h:476
enum AVDiscard skip_frame
Definition: avcodec.h:2721
#define AV_RN32A(p)
Definition: intreadwrite.h:446
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2426
unsigned int crop_top
frame_cropping_rect_top_offset
Definition: h264.h:181
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:680
uint8_t scaling_matrix4[6][16]
Definition: h264.h:235
void h264_init_dequant_tables(H264Context *h)
Definition: h264_slice.c:382
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2647
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:406
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:669
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:37
uint8_t
#define av_cold
Definition: attributes.h:66
int prev_frame_num_offset
for POC type 2
Definition: h264.h:548
int use_weight
Definition: h264.h:425
unsigned int crop_left
frame_cropping_rect_left_offset
Definition: h264.h:179
int offset_for_non_ref_pic
Definition: h264.h:166
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
int data_partitioning
Definition: h264.h:330
int luma_weight[48][2][2]
Definition: h264.h:430
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:210
int poc
frame POC
Definition: h264.h:283
AVCodec ff_h264_decoder
Definition: h264.c:1856
Multithreading support functions.
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int mb_xy
Definition: h264.h:505
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
#define emms_c()
Definition: internal.h:47
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:445
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1158
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:689
const char data[16]
Definition: mxf.c:70
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
int mb_x
Definition: h264.h:498
H264Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:563
uint8_t * data
Definition: avcodec.h:967
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:328
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2650
int chroma_y_shift
Definition: h264.h:326
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:320
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:54
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
high precision timer, useful to profile code
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:297
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1767
int luma_log2_weight_denom
Definition: h264.h:427
int sei_vflip
Definition: h264.h:642
int chroma_weight[48][2][2][2]
Definition: h264.h:431
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:567
#define r
Definition: input.c:51
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:544
H264PredContext hpc
Definition: h264.h:360
int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264_slice.c:1174
static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
Definition: h264.c:1705
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:175
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1333
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1695
int poc_type
pic_order_cnt_type
Definition: h264.h:163
int context_initialized
Definition: h264.h:334
static const uint16_t mask[17]
Definition: lzw.c:38
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
ParseContext parse_context
Definition: h264.h:310
int nal_unit_type
Definition: h264.h:518
int use_weight_chroma
Definition: h264.h:426
int num_reorder_frames
Definition: h264.h:197
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:64
discard all bidirectional frames
Definition: avcodec.h:566
#define AVERROR(e)
Definition: error.h:43
GetBitContext * inter_gb_ptr
Definition: h264.h:457
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:150
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2553
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:1103
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:1071
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:402
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:478
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:2654
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:627
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2651
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1138
int direct_spatial_mv_pred
Definition: h264.h:434
ThreadFrame tf
Definition: h264.h:265
0: frame
Definition: h264.h:142
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:168
const char * name
Name of the codec implementation.
Definition: avcodec.h:2797
H264QpelContext h264qpel
Definition: h264.h:309
ERContext er
Definition: h264.h:312
void ff_init_cabac_states(void)
Definition: cabac.c:124
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:447
#define FFMAX(a, b)
Definition: common.h:55
#define CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:656
uint8_t * mbintra_table
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:167
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:89
static const uint8_t scan8[16 *3+3]
Definition: h264.h:868
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:190
int crop
frame_cropping_flag
Definition: h264.h:176
uint8_t * error_status_table
uint8_t * direct_table
Definition: h264.h:480
int ff_pred_weight_table(H264Context *h)
Definition: h264.c:975
uint8_t scaling_matrix8[6][64]
Definition: h264.h:236
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:531
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:526
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
unsigned int left_samples_available
Definition: h264.h:364
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:54
int sei_anticlockwise_rotation
Definition: h264.h:641
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1282
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:196
int frame_num_offset
for POC type 2
Definition: h264.h:547
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:434
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2400
int x264_build
Definition: h264.h:496
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2545
uint32_t * mb2br_xy
Definition: h264.h:395
uint8_t * er_temp_buffer
int needs_realloc
picture needs to be reallocated (eg due to a frame size change)
Definition: h264.h:295
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:57
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:410
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:57
int last_index
Definition: parser.h:31
#define H264_MAX_THREADS
Definition: h264.h:47
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:168
int reference
Definition: h264.h:296
int redundant_pic_count
Definition: h264.h:561
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:632
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2617
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:426
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
SPS sps
current sps
Definition: h264.h:401
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:533
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
int sei_hflip
Definition: h264.h:642
#define MAX_SPS_COUNT
Definition: h264.h:49
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:534
Context Adaptive Binary Arithmetic Coder inline functions.
H264Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:448
int mmco_reset
Definition: h264.h:577
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:358
uint8_t * bipred_scratchpad
Definition: h264.h:699
#define AV_EF_EXPLODE
Definition: avcodec.h:2411
int poc_lsb
Definition: h264.h:540
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1725
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1178
int ff_set_ref_count(H264Context *h)
Definition: h264.c:1248
Definition: h264.h:116
#define HAVE_THREADS
Definition: config.h:283
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegutils.h:41
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
#define CONFIG_ERROR_RESILIENCE
Definition: config.h:368
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure rotation by the specified angle (in degrees)...
Definition: display.c:52
#define PART_NOT_AVAILABLE
Definition: h264.h:381
unsigned int list_count
Definition: h264.h:446
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2534
if(ac->has_optimized_func)
GetBitContext intra_gb
Definition: h264.h:454
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:535
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:532
struct H264Context * thread_context[H264_MAX_THREADS]
Definition: h264.h:588
static const int8_t mv[256][2]
Definition: 4xm.c:75
int chroma_log2_weight_denom
Definition: h264.h:428
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264.h:529
short offset_for_ref_frame[256]
Definition: h264.h:195
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264.h:530
VideoDSPContext vdsp
Definition: h264.h:306
NULL
Definition: eval.c:55
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int mb_stride
Definition: h264.h:503
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
AVCodecContext * avctx
Definition: h264.h:304
Libavcodec external API header.
H264 / AVC / MPEG4 part10 codec data table
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
static int get_bit_length(H264Context *h, const uint8_t *buf, const uint8_t *ptr, int dst_length, int i, int next_avc)
Definition: h264.c:1337
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:2655
1: top field
Definition: h264.h:143
enum AVCodecID codec_id
Definition: avcodec.h:1061
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:476
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:549
int ff_h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:1208
Definition: h264.h:114
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
int next_outputed_poc
Definition: h264.h:570
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:219
int poc_msb
Definition: h264.h:541
int field_poc[2]
top/bottom POC
Definition: h264.h:282
int debug
debug
Definition: avcodec.h:2356
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int max_contexts
Max number of threads / contexts.
Definition: h264.h:601
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:676
main external API structure.
Definition: avcodec.h:1044
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:67
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:128
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:702
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:404
2: bottom field
Definition: h264.h:144
uint8_t * data
Definition: frame.h:104
int frame_packing_arrangement_type
Definition: h264.h:633
static int find_start_code(const uint8_t *buf, int buf_size, int buf_index, int next_avc)
Definition: h264.c:1302
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
int extradata_size
Definition: avcodec.h:1159
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:212
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:619
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:472
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:2653
int slice_flags
slice flags
Definition: avcodec.h:1555
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:444
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:1828
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:226
int8_t * ref_index[2]
Definition: h264.h:280
Definition: h264.h:112
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:318
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:285
H264Picture * cur_pic_ptr
Definition: h264.h:315
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:2656
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2337
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:79
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:164
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:148
AVCodecContext * avctx
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:80
Views are on top of each other.
Definition: stereo3d.h:55
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2302
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
int pic_struct_present_flag
Definition: h264.h:203
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:32
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1815
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
Definition: h264.h:117
int height
Definition: gxfenc.c:72
Views are next to each other.
Definition: stereo3d.h:45
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:1040
discard all non reference
Definition: avcodec.h:565
AVBufferPool * qscale_table_pool
Definition: h264.h:703
H264Picture * next_output_pic
Definition: h264.h:568
int slice_context_count
Definition: h264.h:603
AVBufferPool * motion_val_pool
Definition: h264.h:705
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:388
uint8_t * rbsp_buffer[2]
Definition: h264.h:519
#define tprintf(p,...)
Definition: get_bits.h:626
MECmpContext mecc
Definition: h264.h:305
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:759
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:755
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:687
int ff_h264_field_end(H264Context *h, int in_setup)
Definition: h264_picture.c:147
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:117
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1771
uint16_t * slice_table_base
Definition: h264.h:537
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:162
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2778
int index
Definition: parser.h:30
int ff_h264_context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:469
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2301
int cur_chroma_format_idc
Definition: h264.h:698
int den
denominator
Definition: rational.h:45
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:649
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:209
void * priv_data
Definition: avcodec.h:1086
#define PICT_FRAME
Definition: mpegutils.h:35
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:546
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2370
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:325
#define FRAME_MBAFF(h)
Definition: h264.h:71
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:52
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:371
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:544
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:365
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1094
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2321
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
H264Picture cur_pic
Definition: h264.h:316
int sei_display_orientation_present
display orientation SEI message
Definition: h264.h:640
int content_interpretation_type
Definition: h264.h:634
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:191
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:502
enum AVPictureType pict_type
Definition: h264.h:611
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:593
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1145
uint32_t * mb2b_xy
Definition: h264.h:394
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:2658
int delta_poc_bottom
Definition: h264.h:542
#define SLICE_FLAG_CODED_ORDER
draw_horiz_band() is called in coded order instead of display
Definition: avcodec.h:1556
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:205
H264DSPContext h264dsp
Definition: h264.h:307
static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index)
Definition: h264.c:1319
Definition: h264.h:110
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:692
int8_t * intra4x4_pred_mode
Definition: h264.h:359
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:2644
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2648
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2963
8: frame tripling
Definition: h264.h:150
#define AV_RN64A(p)
Definition: intreadwrite.h:450
uint8_t(* non_zero_count)[48]
Definition: h264.h:373
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2652
exp golomb vlc stuff
uint8_t * mbskip_table
This structure stores compressed data.
Definition: avcodec.h:944
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:668
int droppable
Definition: h264.h:329
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2335
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:517
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:126
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264_mb.c:804
int b_stride
Definition: h264.h:396
unsigned int rbsp_buffer_size[2]
Definition: h264.h:520
Context Adaptive Binary Arithmetic Coder.
int8_t ref_cache[2][5 *8]
Definition: h264.h:379
Definition: vf_drawbox.c:37
static const AVProfile profiles[]
Definition: h264.c:1839