h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/imgutils.h"
29 #include "internal.h"
30 #include "cabac.h"
31 #include "cabac_functions.h"
32 #include "dsputil.h"
33 #include "avcodec.h"
34 #include "mpegvideo.h"
35 #include "h264.h"
36 #include "h264data.h"
37 #include "h264_mvpred.h"
38 #include "golomb.h"
39 #include "mathops.h"
40 #include "rectangle.h"
41 #include "thread.h"
42 #include "vdpau_internal.h"
43 #include "libavutil/avassert.h"
44 
45 // #undef NDEBUG
46 #include <assert.h>
47 
48 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
49 
50 static const uint8_t rem6[QP_MAX_NUM + 1] = {
51  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
52  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
53  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
54 };
55 
56 static const uint8_t div6[QP_MAX_NUM + 1] = {
57  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
58  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
59  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
60 };
61 
63 #if CONFIG_H264_DXVA2_HWACCEL
65 #endif
66 #if CONFIG_H264_VAAPI_HWACCEL
68 #endif
69 #if CONFIG_H264_VDA_HWACCEL
71 #endif
74 };
75 
81 {
82  MpegEncContext *const s = &h->s;
83  static const int8_t top[12] = {
84  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
85  };
86  static const int8_t left[12] = {
87  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
88  };
89  int i;
90 
91  if (!(h->top_samples_available & 0x8000)) {
92  for (i = 0; i < 4; i++) {
93  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
94  if (status < 0) {
96  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
97  status, s->mb_x, s->mb_y);
98  return -1;
99  } else if (status) {
100  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
101  }
102  }
103  }
104 
105  if ((h->left_samples_available & 0x8888) != 0x8888) {
106  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
107  for (i = 0; i < 4; i++)
108  if (!(h->left_samples_available & mask[i])) {
109  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
110  if (status < 0) {
112  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
113  status, s->mb_x, s->mb_y);
114  return -1;
115  } else if (status) {
116  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
117  }
118  }
119  }
120 
121  return 0;
122 } // FIXME cleanup like ff_h264_check_intra_pred_mode
123 
128 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
129 {
130  MpegEncContext *const s = &h->s;
131  static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 };
132  static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
133 
134  if (mode > 6U) {
136  "out of range intra chroma pred mode at %d %d\n",
137  s->mb_x, s->mb_y);
138  return -1;
139  }
140 
141  if (!(h->top_samples_available & 0x8000)) {
142  mode = top[mode];
143  if (mode < 0) {
145  "top block unavailable for requested intra mode at %d %d\n",
146  s->mb_x, s->mb_y);
147  return -1;
148  }
149  }
150 
151  if ((h->left_samples_available & 0x8080) != 0x8080) {
152  mode = left[mode];
153  if (is_chroma && (h->left_samples_available & 0x8080)) {
154  // mad cow disease mode, aka MBAFF + constrained_intra_pred
155  mode = ALZHEIMER_DC_L0T_PRED8x8 +
156  (!(h->left_samples_available & 0x8000)) +
157  2 * (mode == DC_128_PRED8x8);
158  }
159  if (mode < 0) {
161  "left block unavailable for requested intra mode at %d %d\n",
162  s->mb_x, s->mb_y);
163  return -1;
164  }
165  }
166 
167  return mode;
168 }
169 
171  int *dst_length, int *consumed, int length)
172 {
173  int i, si, di;
174  uint8_t *dst;
175  int bufidx;
176 
177  // src[0]&0x80; // forbidden bit
178  h->nal_ref_idc = src[0] >> 5;
179  h->nal_unit_type = src[0] & 0x1F;
180 
181  src++;
182  length--;
183 
184 #define STARTCODE_TEST \
185  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
186  if (src[i + 2] != 3) { \
187  /* startcode, so we must be past the end */ \
188  length = i; \
189  } \
190  break; \
191  }
192 #if HAVE_FAST_UNALIGNED
193 #define FIND_FIRST_ZERO \
194  if (i > 0 && !src[i]) \
195  i--; \
196  while (src[i]) \
197  i++
198 #if HAVE_FAST_64BIT
199  for (i = 0; i + 1 < length; i += 9) {
200  if (!((~AV_RN64A(src + i) &
201  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
202  0x8000800080008080ULL))
203  continue;
204  FIND_FIRST_ZERO;
206  i -= 7;
207  }
208 #else
209  for (i = 0; i + 1 < length; i += 5) {
210  if (!((~AV_RN32A(src + i) &
211  (AV_RN32A(src + i) - 0x01000101U)) &
212  0x80008080U))
213  continue;
214  FIND_FIRST_ZERO;
216  i -= 3;
217  }
218 #endif
219 #else
220  for (i = 0; i + 1 < length; i += 2) {
221  if (src[i])
222  continue;
223  if (i > 0 && src[i - 1] == 0)
224  i--;
226  }
227 #endif
228 
229  if (i >= length - 1) { // no escaped 0
230  *dst_length = length;
231  *consumed = length + 1; // +1 for the header
232  return src;
233  }
234 
235  // use second escape buffer for inter data
236  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
237  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
239  dst = h->rbsp_buffer[bufidx];
240 
241  if (dst == NULL)
242  return NULL;
243 
244  memcpy(dst, src, i);
245  si = di = i;
246  while (si + 2 < length) {
247  // remove escapes (very rare 1:2^22)
248  if (src[si + 2] > 3) {
249  dst[di++] = src[si++];
250  dst[di++] = src[si++];
251  } else if (src[si] == 0 && src[si + 1] == 0) {
252  if (src[si + 2] == 3) { // escape
253  dst[di++] = 0;
254  dst[di++] = 0;
255  si += 3;
256  continue;
257  } else // next start code
258  goto nsc;
259  }
260 
261  dst[di++] = src[si++];
262  }
263  while (si < length)
264  dst[di++] = src[si++];
265 nsc:
266 
267  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
268 
269  *dst_length = di;
270  *consumed = si + 1; // +1 for the header
271  /* FIXME store exact number of bits in the getbitcontext
272  * (it is needed for decoding) */
273  return dst;
274 }
275 
280 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
281 {
282  int v = *src;
283  int r;
284 
285  tprintf(h->s.avctx, "rbsp trailing %X\n", v);
286 
287  for (r = 1; r < 9; r++) {
288  if (v & 1)
289  return r;
290  v >>= 1;
291  }
292  return 0;
293 }
294 
295 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
296  int height, int y_offset, int list)
297 {
298  int raw_my = h->mv_cache[list][scan8[n]][1];
299  int filter_height_up = (raw_my & 3) ? 2 : 0;
300  int filter_height_down = (raw_my & 3) ? 3 : 0;
301  int full_my = (raw_my >> 2) + y_offset;
302  int top = full_my - filter_height_up;
303  int bottom = full_my + filter_height_down + height;
304 
305  return FFMAX(abs(top), bottom);
306 }
307 
308 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
309  int height, int y_offset, int list0,
310  int list1, int *nrefs)
311 {
312  MpegEncContext *const s = &h->s;
313  int my;
314 
315  y_offset += 16 * (s->mb_y >> MB_FIELD);
316 
317  if (list0) {
318  int ref_n = h->ref_cache[0][scan8[n]];
319  Picture *ref = &h->ref_list[0][ref_n];
320 
321  // Error resilience puts the current picture in the ref list.
322  // Don't try to wait on these as it will cause a deadlock.
323  // Fields can wait on each other, though.
324  if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
325  (ref->f.reference & 3) != s->picture_structure) {
326  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
327  if (refs[0][ref_n] < 0)
328  nrefs[0] += 1;
329  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
330  }
331  }
332 
333  if (list1) {
334  int ref_n = h->ref_cache[1][scan8[n]];
335  Picture *ref = &h->ref_list[1][ref_n];
336 
337  if (ref->f.thread_opaque != s->current_picture.f.thread_opaque ||
338  (ref->f.reference & 3) != s->picture_structure) {
339  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
340  if (refs[1][ref_n] < 0)
341  nrefs[1] += 1;
342  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
343  }
344  }
345 }
346 
353 {
354  MpegEncContext *const s = &h->s;
355  const int mb_xy = h->mb_xy;
356  const int mb_type = s->current_picture.f.mb_type[mb_xy];
357  int refs[2][48];
358  int nrefs[2] = { 0 };
359  int ref, list;
360 
361  memset(refs, -1, sizeof(refs));
362 
363  if (IS_16X16(mb_type)) {
364  get_lowest_part_y(h, refs, 0, 16, 0,
365  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
366  } else if (IS_16X8(mb_type)) {
367  get_lowest_part_y(h, refs, 0, 8, 0,
368  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
369  get_lowest_part_y(h, refs, 8, 8, 8,
370  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
371  } else if (IS_8X16(mb_type)) {
372  get_lowest_part_y(h, refs, 0, 16, 0,
373  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
374  get_lowest_part_y(h, refs, 4, 16, 0,
375  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
376  } else {
377  int i;
378 
379  assert(IS_8X8(mb_type));
380 
381  for (i = 0; i < 4; i++) {
382  const int sub_mb_type = h->sub_mb_type[i];
383  const int n = 4 * i;
384  int y_offset = (i & 2) << 2;
385 
386  if (IS_SUB_8X8(sub_mb_type)) {
387  get_lowest_part_y(h, refs, n, 8, y_offset,
388  IS_DIR(sub_mb_type, 0, 0),
389  IS_DIR(sub_mb_type, 0, 1),
390  nrefs);
391  } else if (IS_SUB_8X4(sub_mb_type)) {
392  get_lowest_part_y(h, refs, n, 4, y_offset,
393  IS_DIR(sub_mb_type, 0, 0),
394  IS_DIR(sub_mb_type, 0, 1),
395  nrefs);
396  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
397  IS_DIR(sub_mb_type, 0, 0),
398  IS_DIR(sub_mb_type, 0, 1),
399  nrefs);
400  } else if (IS_SUB_4X8(sub_mb_type)) {
401  get_lowest_part_y(h, refs, n, 8, y_offset,
402  IS_DIR(sub_mb_type, 0, 0),
403  IS_DIR(sub_mb_type, 0, 1),
404  nrefs);
405  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
406  IS_DIR(sub_mb_type, 0, 0),
407  IS_DIR(sub_mb_type, 0, 1),
408  nrefs);
409  } else {
410  int j;
411  assert(IS_SUB_4X4(sub_mb_type));
412  for (j = 0; j < 4; j++) {
413  int sub_y_offset = y_offset + 2 * (j & 2);
414  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
415  IS_DIR(sub_mb_type, 0, 0),
416  IS_DIR(sub_mb_type, 0, 1),
417  nrefs);
418  }
419  }
420  }
421  }
422 
423  for (list = h->list_count - 1; list >= 0; list--)
424  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
425  int row = refs[list][ref];
426  if (row >= 0) {
427  Picture *ref_pic = &h->ref_list[list][ref];
428  int ref_field = ref_pic->f.reference - 1;
429  int ref_field_picture = ref_pic->field_picture;
430  int pic_height = 16 * s->mb_height >> ref_field_picture;
431 
432  row <<= MB_MBAFF;
433  nrefs[list]--;
434 
435  if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
436  ff_thread_await_progress(&ref_pic->f,
437  FFMIN((row >> 1) - !(row & 1),
438  pic_height - 1),
439  1);
440  ff_thread_await_progress(&ref_pic->f,
441  FFMIN((row >> 1), pic_height - 1),
442  0);
443  } else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
444  ff_thread_await_progress(&ref_pic->f,
445  FFMIN(row * 2 + ref_field,
446  pic_height - 1),
447  0);
448  } else if (FIELD_PICTURE) {
449  ff_thread_await_progress(&ref_pic->f,
450  FFMIN(row, pic_height - 1),
451  ref_field);
452  } else {
453  ff_thread_await_progress(&ref_pic->f,
454  FFMIN(row, pic_height - 1),
455  0);
456  }
457  }
458  }
459 }
460 
462  int n, int square, int height,
463  int delta, int list,
464  uint8_t *dest_y, uint8_t *dest_cb,
465  uint8_t *dest_cr,
466  int src_x_offset, int src_y_offset,
467  qpel_mc_func *qpix_op,
468  h264_chroma_mc_func chroma_op,
469  int pixel_shift, int chroma_idc)
470 {
471  MpegEncContext *const s = &h->s;
472  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
473  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
474  const int luma_xy = (mx & 3) + ((my & 3) << 2);
475  int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
476  uint8_t *src_y = pic->f.data[0] + offset;
477  uint8_t *src_cb, *src_cr;
478  int extra_width = h->emu_edge_width;
479  int extra_height = h->emu_edge_height;
480  int emu = 0;
481  const int full_mx = mx >> 2;
482  const int full_my = my >> 2;
483  const int pic_width = 16 * s->mb_width;
484  const int pic_height = 16 * s->mb_height >> MB_FIELD;
485  int ysh;
486 
487  if (mx & 7)
488  extra_width -= 3;
489  if (my & 7)
490  extra_height -= 3;
491 
492  if (full_mx < 0 - extra_width ||
493  full_my < 0 - extra_height ||
494  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
495  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
497  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
498  h->mb_linesize,
499  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
500  full_my - 2, pic_width, pic_height);
501  src_y = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
502  emu = 1;
503  }
504 
505  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
506  if (!square)
507  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
508 
509  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
510  return;
511 
512  if (chroma_idc == 3 /* yuv444 */) {
513  src_cb = pic->f.data[1] + offset;
514  if (emu) {
516  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
517  h->mb_linesize,
518  16 + 5, 16 + 5 /*FIXME*/,
519  full_mx - 2, full_my - 2,
520  pic_width, pic_height);
521  src_cb = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
522  }
523  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
524  if (!square)
525  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
526 
527  src_cr = pic->f.data[2] + offset;
528  if (emu) {
530  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
531  h->mb_linesize,
532  16 + 5, 16 + 5 /*FIXME*/,
533  full_mx - 2, full_my - 2,
534  pic_width, pic_height);
535  src_cr = s->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
536  }
537  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
538  if (!square)
539  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
540  return;
541  }
542 
543  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
544  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
545  // chroma offset when predicting from a field of opposite parity
546  my += 2 * ((s->mb_y & 1) - (pic->f.reference - 1));
547  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
548  }
549 
550  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
551  (my >> ysh) * h->mb_uvlinesize;
552  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
553  (my >> ysh) * h->mb_uvlinesize;
554 
555  if (emu) {
557  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
558  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
559  src_cb = s->edge_emu_buffer;
560  }
561  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
562  height >> (chroma_idc == 1 /* yuv420 */),
563  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
564 
565  if (emu) {
567  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
568  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
569  src_cr = s->edge_emu_buffer;
570  }
571  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
572  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
573 }
574 
575 static av_always_inline void mc_part_std(H264Context *h, int n, int square,
576  int height, int delta,
577  uint8_t *dest_y, uint8_t *dest_cb,
578  uint8_t *dest_cr,
579  int x_offset, int y_offset,
580  qpel_mc_func *qpix_put,
581  h264_chroma_mc_func chroma_put,
582  qpel_mc_func *qpix_avg,
583  h264_chroma_mc_func chroma_avg,
584  int list0, int list1,
585  int pixel_shift, int chroma_idc)
586 {
587  MpegEncContext *const s = &h->s;
588  qpel_mc_func *qpix_op = qpix_put;
589  h264_chroma_mc_func chroma_op = chroma_put;
590 
591  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
592  if (chroma_idc == 3 /* yuv444 */) {
593  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
594  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
595  } else if (chroma_idc == 2 /* yuv422 */) {
596  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
597  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
598  } else { /* yuv420 */
599  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
600  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
601  }
602  x_offset += 8 * s->mb_x;
603  y_offset += 8 * (s->mb_y >> MB_FIELD);
604 
605  if (list0) {
606  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
607  mc_dir_part(h, ref, n, square, height, delta, 0,
608  dest_y, dest_cb, dest_cr, x_offset, y_offset,
609  qpix_op, chroma_op, pixel_shift, chroma_idc);
610 
611  qpix_op = qpix_avg;
612  chroma_op = chroma_avg;
613  }
614 
615  if (list1) {
616  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
617  mc_dir_part(h, ref, n, square, height, delta, 1,
618  dest_y, dest_cb, dest_cr, x_offset, y_offset,
619  qpix_op, chroma_op, pixel_shift, chroma_idc);
620  }
621 }
622 
624  int height, int delta,
625  uint8_t *dest_y, uint8_t *dest_cb,
626  uint8_t *dest_cr,
627  int x_offset, int y_offset,
628  qpel_mc_func *qpix_put,
629  h264_chroma_mc_func chroma_put,
630  h264_weight_func luma_weight_op,
631  h264_weight_func chroma_weight_op,
632  h264_biweight_func luma_weight_avg,
633  h264_biweight_func chroma_weight_avg,
634  int list0, int list1,
635  int pixel_shift, int chroma_idc)
636 {
637  MpegEncContext *const s = &h->s;
638  int chroma_height;
639 
640  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
641  if (chroma_idc == 3 /* yuv444 */) {
642  chroma_height = height;
643  chroma_weight_avg = luma_weight_avg;
644  chroma_weight_op = luma_weight_op;
645  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
646  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
647  } else if (chroma_idc == 2 /* yuv422 */) {
648  chroma_height = height;
649  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
650  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
651  } else { /* yuv420 */
652  chroma_height = height >> 1;
653  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
654  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
655  }
656  x_offset += 8 * s->mb_x;
657  y_offset += 8 * (s->mb_y >> MB_FIELD);
658 
659  if (list0 && list1) {
660  /* don't optimize for luma-only case, since B-frames usually
661  * use implicit weights => chroma too. */
662  uint8_t *tmp_cb = h->bipred_scratchpad;
663  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
664  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
665  int refn0 = h->ref_cache[0][scan8[n]];
666  int refn1 = h->ref_cache[1][scan8[n]];
667 
668  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
669  dest_y, dest_cb, dest_cr,
670  x_offset, y_offset, qpix_put, chroma_put,
671  pixel_shift, chroma_idc);
672  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
673  tmp_y, tmp_cb, tmp_cr,
674  x_offset, y_offset, qpix_put, chroma_put,
675  pixel_shift, chroma_idc);
676 
677  if (h->use_weight == 2) {
678  int weight0 = h->implicit_weight[refn0][refn1][s->mb_y & 1];
679  int weight1 = 64 - weight0;
680  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
681  height, 5, weight0, weight1, 0);
682  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
683  chroma_height, 5, weight0, weight1, 0);
684  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
685  chroma_height, 5, weight0, weight1, 0);
686  } else {
687  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
689  h->luma_weight[refn0][0][0],
690  h->luma_weight[refn1][1][0],
691  h->luma_weight[refn0][0][1] +
692  h->luma_weight[refn1][1][1]);
693  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
695  h->chroma_weight[refn0][0][0][0],
696  h->chroma_weight[refn1][1][0][0],
697  h->chroma_weight[refn0][0][0][1] +
698  h->chroma_weight[refn1][1][0][1]);
699  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
701  h->chroma_weight[refn0][0][1][0],
702  h->chroma_weight[refn1][1][1][0],
703  h->chroma_weight[refn0][0][1][1] +
704  h->chroma_weight[refn1][1][1][1]);
705  }
706  } else {
707  int list = list1 ? 1 : 0;
708  int refn = h->ref_cache[list][scan8[n]];
709  Picture *ref = &h->ref_list[list][refn];
710  mc_dir_part(h, ref, n, square, height, delta, list,
711  dest_y, dest_cb, dest_cr, x_offset, y_offset,
712  qpix_put, chroma_put, pixel_shift, chroma_idc);
713 
714  luma_weight_op(dest_y, h->mb_linesize, height,
716  h->luma_weight[refn][list][0],
717  h->luma_weight[refn][list][1]);
718  if (h->use_weight_chroma) {
719  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
721  h->chroma_weight[refn][list][0][0],
722  h->chroma_weight[refn][list][0][1]);
723  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
725  h->chroma_weight[refn][list][1][0],
726  h->chroma_weight[refn][list][1][1]);
727  }
728  }
729 }
730 
732  int pixel_shift, int chroma_idc)
733 {
734  /* fetch pixels for estimated mv 4 macroblocks ahead
735  * optimized for 64byte cache lines */
736  MpegEncContext *const s = &h->s;
737  const int refn = h->ref_cache[list][scan8[0]];
738  if (refn >= 0) {
739  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * s->mb_x + 8;
740  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * s->mb_y;
741  uint8_t **src = h->ref_list[list][refn].f.data;
742  int off = (mx << pixel_shift) +
743  (my + (s->mb_x & 3) * 4) * h->mb_linesize +
744  (64 << pixel_shift);
745  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
746  if (chroma_idc == 3 /* yuv444 */) {
747  s->vdsp.prefetch(src[1] + off, s->linesize, 4);
748  s->vdsp.prefetch(src[2] + off, s->linesize, 4);
749  } else {
750  off = ((mx >> 1) << pixel_shift) +
751  ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize +
752  (64 << pixel_shift);
753  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
754  }
755  }
756 }
757 
758 static void free_tables(H264Context *h, int free_rbsp)
759 {
760  int i;
761  H264Context *hx;
762 
765  av_freep(&h->cbp_table);
766  av_freep(&h->mvd_table[0]);
767  av_freep(&h->mvd_table[1]);
768  av_freep(&h->direct_table);
771  h->slice_table = NULL;
772  av_freep(&h->list_counts);
773 
774  av_freep(&h->mb2b_xy);
775  av_freep(&h->mb2br_xy);
776 
777  for (i = 0; i < MAX_THREADS; i++) {
778  hx = h->thread_context[i];
779  if (!hx)
780  continue;
781  av_freep(&hx->top_borders[1]);
782  av_freep(&hx->top_borders[0]);
784  if (free_rbsp) {
785  av_freep(&hx->rbsp_buffer[1]);
786  av_freep(&hx->rbsp_buffer[0]);
787  hx->rbsp_buffer_size[0] = 0;
788  hx->rbsp_buffer_size[1] = 0;
789  }
790  if (i)
791  av_freep(&h->thread_context[i]);
792  }
793 }
794 
796 {
797  int i, j, q, x;
798  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
799 
800  for (i = 0; i < 6; i++) {
801  h->dequant8_coeff[i] = h->dequant8_buffer[i];
802  for (j = 0; j < i; j++)
803  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
804  64 * sizeof(uint8_t))) {
805  h->dequant8_coeff[i] = h->dequant8_buffer[j];
806  break;
807  }
808  if (j < i)
809  continue;
810 
811  for (q = 0; q < max_qp + 1; q++) {
812  int shift = div6[q];
813  int idx = rem6[q];
814  for (x = 0; x < 64; x++)
815  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
816  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
817  h->pps.scaling_matrix8[i][x]) << shift;
818  }
819  }
820 }
821 
823 {
824  int i, j, q, x;
825  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
826  for (i = 0; i < 6; i++) {
827  h->dequant4_coeff[i] = h->dequant4_buffer[i];
828  for (j = 0; j < i; j++)
829  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
830  16 * sizeof(uint8_t))) {
831  h->dequant4_coeff[i] = h->dequant4_buffer[j];
832  break;
833  }
834  if (j < i)
835  continue;
836 
837  for (q = 0; q < max_qp + 1; q++) {
838  int shift = div6[q] + 2;
839  int idx = rem6[q];
840  for (x = 0; x < 16; x++)
841  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
842  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
843  h->pps.scaling_matrix4[i][x]) << shift;
844  }
845  }
846 }
847 
849 {
850  int i, x;
852  if (h->pps.transform_8x8_mode)
854  if (h->sps.transform_bypass) {
855  for (i = 0; i < 6; i++)
856  for (x = 0; x < 16; x++)
857  h->dequant4_coeff[i][0][x] = 1 << 6;
859  for (i = 0; i < 6; i++)
860  for (x = 0; x < 64; x++)
861  h->dequant8_coeff[i][0][x] = 1 << 6;
862  }
863 }
864 
866 {
867  MpegEncContext *const s = &h->s;
868  const int big_mb_num = s->mb_stride * (s->mb_height + 1);
869  const int row_mb_num = s->mb_stride * 2 * s->avctx->thread_count;
870  int x, y;
871 
873  row_mb_num * 8 * sizeof(uint8_t), fail)
875  big_mb_num * 48 * sizeof(uint8_t), fail)
877  (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base), fail)
879  big_mb_num * sizeof(uint16_t), fail)
881  big_mb_num * sizeof(uint8_t), fail)
883  16 * row_mb_num * sizeof(uint8_t), fail);
885  16 * row_mb_num * sizeof(uint8_t), fail);
887  4 * big_mb_num * sizeof(uint8_t), fail);
889  big_mb_num * sizeof(uint8_t), fail)
890 
891  memset(h->slice_table_base, -1,
892  (big_mb_num + s->mb_stride) * sizeof(*h->slice_table_base));
893  h->slice_table = h->slice_table_base + s->mb_stride * 2 + 1;
894 
896  big_mb_num * sizeof(uint32_t), fail);
898  big_mb_num * sizeof(uint32_t), fail);
899  for (y = 0; y < s->mb_height; y++)
900  for (x = 0; x < s->mb_width; x++) {
901  const int mb_xy = x + y * s->mb_stride;
902  const int b_xy = 4 * x + 4 * y * h->b_stride;
903 
904  h->mb2b_xy[mb_xy] = b_xy;
905  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * s->mb_stride)));
906  }
907 
908  if (!h->dequant4_coeff[0])
910 
911  return 0;
912 
913 fail:
914  free_tables(h, 1);
915  return -1;
916 }
917 
921 static void clone_tables(H264Context *dst, H264Context *src, int i)
922 {
923  MpegEncContext *const s = &src->s;
924  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * s->mb_stride;
925  dst->non_zero_count = src->non_zero_count;
926  dst->slice_table = src->slice_table;
927  dst->cbp_table = src->cbp_table;
928  dst->mb2b_xy = src->mb2b_xy;
929  dst->mb2br_xy = src->mb2br_xy;
931  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * s->mb_stride;
932  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * s->mb_stride;
933  dst->direct_table = src->direct_table;
934  dst->list_counts = src->list_counts;
935  dst->bipred_scratchpad = NULL;
936  ff_h264_pred_init(&dst->hpc, src->s.codec_id, src->sps.bit_depth_luma,
937  src->sps.chroma_format_idc);
938 }
939 
944 static int context_init(H264Context *h)
945 {
947  h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
949  h->s.mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
950 
951  h->ref_cache[0][scan8[5] + 1] =
952  h->ref_cache[0][scan8[7] + 1] =
953  h->ref_cache[0][scan8[13] + 1] =
954  h->ref_cache[1][scan8[5] + 1] =
955  h->ref_cache[1][scan8[7] + 1] =
956  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
957 
958  return 0;
959 
960 fail:
961  return -1; // free_tables will clean up for us
962 }
963 
964 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
965  int parse_extradata);
966 
968 {
969  MpegEncContext *const s = &h->s;
970 
971  s->width = s->avctx->width;
972  s->height = s->avctx->height;
973  s->codec_id = s->avctx->codec->id;
974 
975  ff_h264dsp_init(&h->h264dsp, 8, 1);
976  ff_h264_pred_init(&h->hpc, s->codec_id, 8, 1);
977 
978  h->dequant_coeff_pps = -1;
979  s->unrestricted_mv = 1;
980 
981  /* needed so that IDCT permutation is known early */
982  ff_dsputil_init(&s->dsp, s->avctx);
983  ff_videodsp_init(&s->vdsp, 8);
984 
985  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
986  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
987 }
988 
990 {
991  AVCodecContext *avctx = h->s.avctx;
992 
993  if (avctx->extradata[0] == 1) {
994  int i, cnt, nalsize;
995  unsigned char *p = avctx->extradata;
996 
997  h->is_avc = 1;
998 
999  if (avctx->extradata_size < 7) {
1000  av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
1001  return -1;
1002  }
1003  /* sps and pps in the avcC always have length coded with 2 bytes,
1004  * so put a fake nal_length_size = 2 while parsing them */
1005  h->nal_length_size = 2;
1006  // Decode sps from avcC
1007  cnt = *(p + 5) & 0x1f; // Number of sps
1008  p += 6;
1009  for (i = 0; i < cnt; i++) {
1010  nalsize = AV_RB16(p) + 2;
1011  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1012  return -1;
1013  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1014  av_log(avctx, AV_LOG_ERROR,
1015  "Decoding sps %d from avcC failed\n", i);
1016  return -1;
1017  }
1018  p += nalsize;
1019  }
1020  // Decode pps from avcC
1021  cnt = *(p++); // Number of pps
1022  for (i = 0; i < cnt; i++) {
1023  nalsize = AV_RB16(p) + 2;
1024  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1025  return -1;
1026  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1027  av_log(avctx, AV_LOG_ERROR,
1028  "Decoding pps %d from avcC failed\n", i);
1029  return -1;
1030  }
1031  p += nalsize;
1032  }
1033  // Now store right nal length size, that will be used to parse all other nals
1034  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
1035  } else {
1036  h->is_avc = 0;
1037  if (decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1) < 0)
1038  return -1;
1039  }
1040  return 0;
1041 }
1042 
1044 {
1045  H264Context *h = avctx->priv_data;
1046  MpegEncContext *const s = &h->s;
1047  int i;
1048 
1050 
1051  s->avctx = avctx;
1052  common_init(h);
1053 
1054  s->out_format = FMT_H264;
1055  s->workaround_bugs = avctx->workaround_bugs;
1056 
1057  /* set defaults */
1058  // s->decode_mb = ff_h263_decode_mb;
1059  s->quarter_sample = 1;
1060  if (!avctx->has_b_frames)
1061  s->low_delay = 1;
1062 
1064 
1066 
1067  h->pixel_shift = 0;
1068  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1069 
1070  h->thread_context[0] = h;
1071  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1072  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1073  h->last_pocs[i] = INT_MIN;
1074  h->prev_poc_msb = 1 << 16;
1075  h->x264_build = -1;
1076  ff_h264_reset_sei(h);
1077  if (avctx->codec_id == AV_CODEC_ID_H264) {
1078  if (avctx->ticks_per_frame == 1)
1079  s->avctx->time_base.den *= 2;
1080  avctx->ticks_per_frame = 2;
1081  }
1082 
1083  if (avctx->extradata_size > 0 && avctx->extradata &&
1085  return -1;
1086 
1090  s->low_delay = 0;
1091  }
1092 
1093  return 0;
1094 }
1095 
1096 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1097 
1098 static void copy_picture_range(Picture **to, Picture **from, int count,
1099  MpegEncContext *new_base,
1100  MpegEncContext *old_base)
1101 {
1102  int i;
1103 
1104  for (i = 0; i < count; i++) {
1105  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1106  IN_RANGE(from[i], old_base->picture,
1107  sizeof(Picture) * old_base->picture_count) ||
1108  !from[i]));
1109  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1110  }
1111 }
1112 
1113 static void copy_parameter_set(void **to, void **from, int count, int size)
1114 {
1115  int i;
1116 
1117  for (i = 0; i < count; i++) {
1118  if (to[i] && !from[i])
1119  av_freep(&to[i]);
1120  else if (from[i] && !to[i])
1121  to[i] = av_malloc(size);
1122 
1123  if (from[i])
1124  memcpy(to[i], from[i], size);
1125  }
1126 }
1127 
1129 {
1130  H264Context *h = avctx->priv_data;
1131 
1132  if (!avctx->internal->is_copy)
1133  return 0;
1134  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1135  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1136 
1137  h->s.context_initialized = 0;
1138 
1139  return 0;
1140 }
1141 
1142 #define copy_fields(to, from, start_field, end_field) \
1143  memcpy(&to->start_field, &from->start_field, \
1144  (char *)&to->end_field - (char *)&to->start_field)
1145 
1146 static int h264_slice_header_init(H264Context *, int);
1147 
1149 
1151  const AVCodecContext *src)
1152 {
1153  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1154  MpegEncContext *const s = &h->s, *const s1 = &h1->s;
1155  int inited = s->context_initialized, err;
1156  int i;
1157 
1158  if (dst == src || !s1->context_initialized)
1159  return 0;
1160 
1161  if (inited &&
1162  (s->width != s1->width ||
1163  s->height != s1->height ||
1164  s->mb_width != s1->mb_width ||
1165  s->mb_height != s1->mb_height ||
1166  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1167  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1168  h->sps.colorspace != h1->sps.colorspace)) {
1169 
1171 
1172  s->width = s1->width;
1173  s->height = s1->height;
1174  s->mb_height = s1->mb_height;
1175  h->b_stride = h1->b_stride;
1176 
1177  if ((err = h264_slice_header_init(h, 1)) < 0) {
1178  av_log(h->s.avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1179  return err;
1180  }
1181  h->context_reinitialized = 1;
1182 
1183  /* update linesize on resize for h264. The h264 decoder doesn't
1184  * necessarily call ff_MPV_frame_start in the new thread */
1185  s->linesize = s1->linesize;
1186  s->uvlinesize = s1->uvlinesize;
1187 
1188  /* copy block_offset since frame_start may not be called */
1189  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1191  }
1192 
1193  err = ff_mpeg_update_thread_context(dst, src);
1194  if (err)
1195  return err;
1196 
1197  if (!inited) {
1198  for (i = 0; i < MAX_SPS_COUNT; i++)
1199  av_freep(h->sps_buffers + i);
1200 
1201  for (i = 0; i < MAX_PPS_COUNT; i++)
1202  av_freep(h->pps_buffers + i);
1203 
1204  // copy all fields after MpegEnc
1205  memcpy(&h->s + 1, &h1->s + 1,
1206  sizeof(H264Context) - sizeof(MpegEncContext));
1207  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1208  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1209  if (ff_h264_alloc_tables(h) < 0) {
1210  av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
1211  return AVERROR(ENOMEM);
1212  }
1213  context_init(h);
1214 
1215  for (i = 0; i < 2; i++) {
1216  h->rbsp_buffer[i] = NULL;
1217  h->rbsp_buffer_size[i] = 0;
1218  }
1219  h->bipred_scratchpad = NULL;
1220 
1221  h->thread_context[0] = h;
1222 
1223  s->dsp.clear_blocks(h->mb);
1224  s->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift));
1225  }
1226 
1227  /* frame_start may not be called for the next thread (if it's decoding
1228  * a bottom field) so this has to be allocated here */
1229  if (!h->bipred_scratchpad)
1230  h->bipred_scratchpad = av_malloc(16 * 6 * s->linesize);
1231 
1232  // extradata/NAL handling
1233  h->is_avc = h1->is_avc;
1234 
1235  // SPS/PPS
1236  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1237  MAX_SPS_COUNT, sizeof(SPS));
1238  h->sps = h1->sps;
1239  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1240  MAX_PPS_COUNT, sizeof(PPS));
1241  h->pps = h1->pps;
1242 
1243  // Dequantization matrices
1244  // FIXME these are big - can they be only copied when PPS changes?
1245  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1246 
1247  for (i = 0; i < 6; i++)
1248  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1249  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1250 
1251  for (i = 0; i < 6; i++)
1252  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1253  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1254 
1255  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1256 
1257  // POC timing
1258  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1259 
1260  // reference lists
1261  copy_fields(h, h1, ref_count, list_count);
1262  copy_fields(h, h1, ref_list, intra_gb);
1263  copy_fields(h, h1, short_ref, cabac_init_idc);
1264 
1265  copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1);
1266  copy_picture_range(h->long_ref, h1->long_ref, 32, s, s1);
1267  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1268  MAX_DELAYED_PIC_COUNT + 2, s, s1);
1269 
1270  h->last_slice_type = h1->last_slice_type;
1271 
1272  if (!s->current_picture_ptr)
1273  return 0;
1274 
1275  if (!s->droppable) {
1277  h->prev_poc_msb = h->poc_msb;
1278  h->prev_poc_lsb = h->poc_lsb;
1279  }
1281  h->prev_frame_num = h->frame_num;
1283 
1284  return err;
1285 }
1286 
1288 {
1289  MpegEncContext *const s = &h->s;
1290  int i;
1291  const int pixel_shift = h->pixel_shift;
1292 
1293  if (ff_MPV_frame_start(s, s->avctx) < 0)
1294  return -1;
1295  ff_er_frame_start(s);
1296  /*
1297  * ff_MPV_frame_start uses pict_type to derive key_frame.
1298  * This is incorrect for H.264; IDR markings must be used.
1299  * Zero here; IDR markings per slice in frame or fields are ORed in later.
1300  * See decode_nal_units().
1301  */
1304 
1305  assert(s->linesize && s->uvlinesize);
1306 
1307  for (i = 0; i < 16; i++) {
1308  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
1309  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
1310  }
1311  for (i = 0; i < 16; i++) {
1312  h->block_offset[16 + i] =
1313  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1314  h->block_offset[48 + 16 + i] =
1315  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1316  }
1317 
1318  /* can't be in alloc_tables because linesize isn't known there.
1319  * FIXME: redo bipred weight to not require extra buffer? */
1320  for (i = 0; i < s->slice_context_count; i++)
1321  if (h->thread_context[i] && !h->thread_context[i]->bipred_scratchpad)
1322  h->thread_context[i]->bipred_scratchpad = av_malloc(16 * 6 * s->linesize);
1323 
1324  /* Some macroblocks can be accessed before they're available in case
1325  * of lost slices, MBAFF or threading. */
1326  memset(h->slice_table, -1,
1327  (s->mb_height * s->mb_stride - 1) * sizeof(*h->slice_table));
1328 
1329  // s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1330  // s->current_picture.f.reference /* || h->contains_intra */ || 1;
1331 
1332  /* We mark the current picture as non-reference after allocating it, so
1333  * that if we break out due to an error it can be released automatically
1334  * in the next ff_MPV_frame_start().
1335  * SVQ3 as well as most other codecs have only last/next/current and thus
1336  * get released even with set reference, besides SVQ3 and others do not
1337  * mark frames as reference later "naturally". */
1338  if (s->codec_id != AV_CODEC_ID_SVQ3)
1340 
1342  s->current_picture_ptr->field_poc[1] = INT_MAX;
1343 
1344  h->next_output_pic = NULL;
1345 
1346  assert(s->current_picture_ptr->long_ref == 0);
1347 
1348  return 0;
1349 }
1350 
1359 static void decode_postinit(H264Context *h, int setup_finished)
1360 {
1361  MpegEncContext *const s = &h->s;
1362  Picture *out = s->current_picture_ptr;
1363  Picture *cur = s->current_picture_ptr;
1364  int i, pics, out_of_order, out_idx;
1365  int invalid = 0, cnt = 0;
1366 
1369 
1370  if (h->next_output_pic)
1371  return;
1372 
1373  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1374  /* FIXME: if we have two PAFF fields in one packet, we can't start
1375  * the next thread here. If we have one field per packet, we can.
1376  * The check in decode_nal_units() is not good enough to find this
1377  * yet, so we assume the worst for now. */
1378  // if (setup_finished)
1379  // ff_thread_finish_setup(s->avctx);
1380  return;
1381  }
1382 
1383  cur->f.interlaced_frame = 0;
1384  cur->f.repeat_pict = 0;
1385 
1386  /* Signal interlacing information externally. */
1387  /* Prioritize picture timing SEI information over used
1388  * decoding process if it exists. */
1389 
1390  if (h->sps.pic_struct_present_flag) {
1391  switch (h->sei_pic_struct) {
1392  case SEI_PIC_STRUCT_FRAME:
1393  break;
1396  cur->f.interlaced_frame = 1;
1397  break;
1401  cur->f.interlaced_frame = 1;
1402  else
1403  // try to flag soft telecine progressive
1405  break;
1408  /* Signal the possibility of telecined film externally
1409  * (pic_struct 5,6). From these hints, let the applications
1410  * decide if they apply deinterlacing. */
1411  cur->f.repeat_pict = 1;
1412  break;
1414  // Force progressive here, doubling interlaced frame is a bad idea.
1415  cur->f.repeat_pict = 2;
1416  break;
1418  cur->f.repeat_pict = 4;
1419  break;
1420  }
1421 
1422  if ((h->sei_ct_type & 3) &&
1424  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
1425  } else {
1426  /* Derive interlacing flag from used decoding process. */
1428  }
1430 
1431  if (cur->field_poc[0] != cur->field_poc[1]) {
1432  /* Derive top_field_first from field pocs. */
1433  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
1434  } else {
1435  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
1436  /* Use picture timing SEI information. Even if it is a
1437  * information of a past frame, better than nothing. */
1440  cur->f.top_field_first = 1;
1441  else
1442  cur->f.top_field_first = 0;
1443  } else {
1444  /* Most likely progressive */
1445  cur->f.top_field_first = 0;
1446  }
1447  }
1448 
1449  // FIXME do something with unavailable reference frames
1450 
1451  /* Sort B-frames into display order */
1452 
1456  s->low_delay = 0;
1457  }
1458 
1462  s->low_delay = 0;
1463  }
1464 
1465  pics = 0;
1466  while (h->delayed_pic[pics])
1467  pics++;
1468 
1469  assert(pics <= MAX_DELAYED_PIC_COUNT);
1470 
1471  h->delayed_pic[pics++] = cur;
1472  if (cur->f.reference == 0)
1473  cur->f.reference = DELAYED_PIC_REF;
1474 
1475  /* Frame reordering. This code takes pictures from coding order and sorts
1476  * them by their incremental POC value into display order. It supports POC
1477  * gaps, MMCO reset codes and random resets.
1478  * A "display group" can start either with a IDR frame (f.key_frame = 1),
1479  * and/or can be closed down with a MMCO reset code. In sequences where
1480  * there is no delay, we can't detect that (since the frame was already
1481  * output to the user), so we also set h->mmco_reset to detect the MMCO
1482  * reset code.
1483  * FIXME: if we detect insufficient delays (as per s->avctx->has_b_frames),
1484  * we increase the delay between input and output. All frames affected by
1485  * the lag (e.g. those that should have been output before another frame
1486  * that we already returned to the user) will be dropped. This is a bug
1487  * that we will fix later. */
1488  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
1489  cnt += out->poc < h->last_pocs[i];
1490  invalid += out->poc == INT_MIN;
1491  }
1492  if (!h->mmco_reset && !cur->f.key_frame &&
1493  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
1494  h->mmco_reset = 2;
1495  if (pics > 1)
1496  h->delayed_pic[pics - 2]->mmco_reset = 2;
1497  }
1498  if (h->mmco_reset || cur->f.key_frame) {
1499  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1500  h->last_pocs[i] = INT_MIN;
1501  cnt = 0;
1502  invalid = MAX_DELAYED_PIC_COUNT;
1503  }
1504  out = h->delayed_pic[0];
1505  out_idx = 0;
1506  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
1507  h->delayed_pic[i] &&
1508  !h->delayed_pic[i - 1]->mmco_reset &&
1509  !h->delayed_pic[i]->f.key_frame;
1510  i++)
1511  if (h->delayed_pic[i]->poc < out->poc) {
1512  out = h->delayed_pic[i];
1513  out_idx = i;
1514  }
1515  if (s->avctx->has_b_frames == 0 &&
1516  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
1517  h->next_outputed_poc = INT_MIN;
1518  out_of_order = !out->f.key_frame && !h->mmco_reset &&
1519  (out->poc < h->next_outputed_poc);
1520 
1523  } else if (out_of_order && pics - 1 == s->avctx->has_b_frames &&
1524  s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
1525  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
1526  s->avctx->has_b_frames = FFMAX(s->avctx->has_b_frames, cnt);
1527  }
1528  s->low_delay = 0;
1529  } else if (s->low_delay &&
1530  ((h->next_outputed_poc != INT_MIN &&
1531  out->poc > h->next_outputed_poc + 2) ||
1532  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
1533  s->low_delay = 0;
1534  s->avctx->has_b_frames++;
1535  }
1536 
1537  if (pics > s->avctx->has_b_frames) {
1538  out->f.reference &= ~DELAYED_PIC_REF;
1539  // for frame threading, the owner must be the second field's thread or
1540  // else the first thread can release the picture and reuse it unsafely
1541  out->owner2 = s;
1542  for (i = out_idx; h->delayed_pic[i]; i++)
1543  h->delayed_pic[i] = h->delayed_pic[i + 1];
1544  }
1545  memmove(h->last_pocs, &h->last_pocs[1],
1546  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
1547  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
1548  if (!out_of_order && pics > s->avctx->has_b_frames) {
1549  h->next_output_pic = out;
1550  if (out->mmco_reset) {
1551  if (out_idx > 0) {
1552  h->next_outputed_poc = out->poc;
1553  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
1554  } else {
1555  h->next_outputed_poc = INT_MIN;
1556  }
1557  } else {
1558  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
1559  h->next_outputed_poc = INT_MIN;
1560  } else {
1561  h->next_outputed_poc = out->poc;
1562  }
1563  }
1564  h->mmco_reset = 0;
1565  } else {
1566  av_log(s->avctx, AV_LOG_DEBUG, "no picture\n");
1567  }
1568 
1569  if (setup_finished)
1571 }
1572 
1574  uint8_t *src_cb, uint8_t *src_cr,
1575  int linesize, int uvlinesize,
1576  int simple)
1577 {
1578  MpegEncContext *const s = &h->s;
1579  uint8_t *top_border;
1580  int top_idx = 1;
1581  const int pixel_shift = h->pixel_shift;
1582  int chroma444 = CHROMA444;
1583  int chroma422 = CHROMA422;
1584 
1585  src_y -= linesize;
1586  src_cb -= uvlinesize;
1587  src_cr -= uvlinesize;
1588 
1589  if (!simple && FRAME_MBAFF) {
1590  if (s->mb_y & 1) {
1591  if (!MB_MBAFF) {
1592  top_border = h->top_borders[0][s->mb_x];
1593  AV_COPY128(top_border, src_y + 15 * linesize);
1594  if (pixel_shift)
1595  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
1596  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1597  if (chroma444) {
1598  if (pixel_shift) {
1599  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1600  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
1601  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
1602  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
1603  } else {
1604  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
1605  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
1606  }
1607  } else if (chroma422) {
1608  if (pixel_shift) {
1609  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1610  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
1611  } else {
1612  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
1613  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
1614  }
1615  } else {
1616  if (pixel_shift) {
1617  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
1618  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
1619  } else {
1620  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1621  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1622  }
1623  }
1624  }
1625  }
1626  } else if (MB_MBAFF) {
1627  top_idx = 0;
1628  } else
1629  return;
1630  }
1631 
1632  top_border = h->top_borders[top_idx][s->mb_x];
1633  /* There are two lines saved, the line above the top macroblock
1634  * of a pair, and the line above the bottom macroblock. */
1635  AV_COPY128(top_border, src_y + 16 * linesize);
1636  if (pixel_shift)
1637  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
1638 
1639  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1640  if (chroma444) {
1641  if (pixel_shift) {
1642  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
1643  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
1644  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
1645  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
1646  } else {
1647  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
1648  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
1649  }
1650  } else if (chroma422) {
1651  if (pixel_shift) {
1652  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
1653  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
1654  } else {
1655  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
1656  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
1657  }
1658  } else {
1659  if (pixel_shift) {
1660  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
1661  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
1662  } else {
1663  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
1664  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
1665  }
1666  }
1667  }
1668 }
1669 
1671  uint8_t *src_cb, uint8_t *src_cr,
1672  int linesize, int uvlinesize,
1673  int xchg, int chroma444,
1674  int simple, int pixel_shift)
1675 {
1676  MpegEncContext *const s = &h->s;
1677  int deblock_topleft;
1678  int deblock_top;
1679  int top_idx = 1;
1680  uint8_t *top_border_m1;
1681  uint8_t *top_border;
1682 
1683  if (!simple && FRAME_MBAFF) {
1684  if (s->mb_y & 1) {
1685  if (!MB_MBAFF)
1686  return;
1687  } else {
1688  top_idx = MB_MBAFF ? 0 : 1;
1689  }
1690  }
1691 
1692  if (h->deblocking_filter == 2) {
1693  deblock_topleft = h->slice_table[h->mb_xy - 1 - s->mb_stride] == h->slice_num;
1694  deblock_top = h->top_type;
1695  } else {
1696  deblock_topleft = (s->mb_x > 0);
1697  deblock_top = (s->mb_y > !!MB_FIELD);
1698  }
1699 
1700  src_y -= linesize + 1 + pixel_shift;
1701  src_cb -= uvlinesize + 1 + pixel_shift;
1702  src_cr -= uvlinesize + 1 + pixel_shift;
1703 
1704  top_border_m1 = h->top_borders[top_idx][s->mb_x - 1];
1705  top_border = h->top_borders[top_idx][s->mb_x];
1706 
1707 #define XCHG(a, b, xchg) \
1708  if (pixel_shift) { \
1709  if (xchg) { \
1710  AV_SWAP64(b + 0, a + 0); \
1711  AV_SWAP64(b + 8, a + 8); \
1712  } else { \
1713  AV_COPY128(b, a); \
1714  } \
1715  } else if (xchg) \
1716  AV_SWAP64(b, a); \
1717  else \
1718  AV_COPY64(b, a);
1719 
1720  if (deblock_top) {
1721  if (deblock_topleft) {
1722  XCHG(top_border_m1 + (8 << pixel_shift),
1723  src_y - (7 << pixel_shift), 1);
1724  }
1725  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
1726  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
1727  if (s->mb_x + 1 < s->mb_width) {
1728  XCHG(h->top_borders[top_idx][s->mb_x + 1],
1729  src_y + (17 << pixel_shift), 1);
1730  }
1731  }
1732  if (simple || !CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1733  if (chroma444) {
1734  if (deblock_topleft) {
1735  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1736  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1737  }
1738  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
1739  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
1740  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
1741  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
1742  if (s->mb_x + 1 < s->mb_width) {
1743  XCHG(h->top_borders[top_idx][s->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
1744  XCHG(h->top_borders[top_idx][s->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
1745  }
1746  } else {
1747  if (deblock_top) {
1748  if (deblock_topleft) {
1749  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
1750  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
1751  }
1752  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
1753  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
1754  }
1755  }
1756  }
1757 }
1758 
1759 static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth,
1760  int index)
1761 {
1762  if (high_bit_depth) {
1763  return AV_RN32A(((int32_t *)mb) + index);
1764  } else
1765  return AV_RN16A(mb + index);
1766 }
1767 
1768 static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth,
1769  int index, int value)
1770 {
1771  if (high_bit_depth) {
1772  AV_WN32A(((int32_t *)mb) + index, value);
1773  } else
1774  AV_WN16A(mb + index, value);
1775 }
1776 
1778  int mb_type, int is_h264,
1779  int simple,
1780  int transform_bypass,
1781  int pixel_shift,
1782  int *block_offset,
1783  int linesize,
1784  uint8_t *dest_y, int p)
1785 {
1786  MpegEncContext *const s = &h->s;
1787  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
1788  void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
1789  int i;
1790  int qscale = p == 0 ? s->qscale : h->chroma_qp[p - 1];
1791  block_offset += 16 * p;
1792  if (IS_INTRA4x4(mb_type)) {
1793  if (simple || !s->encoding) {
1794  if (IS_8x8DCT(mb_type)) {
1795  if (transform_bypass) {
1796  idct_dc_add =
1797  idct_add = s->dsp.add_pixels8;
1798  } else {
1799  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
1801  }
1802  for (i = 0; i < 16; i += 4) {
1803  uint8_t *const ptr = dest_y + block_offset[i];
1804  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
1805  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
1806  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1807  } else {
1808  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
1809  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
1810  (h->topright_samples_available << i) & 0x4000, linesize);
1811  if (nnz) {
1812  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1813  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1814  else
1815  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1816  }
1817  }
1818  }
1819  } else {
1820  if (transform_bypass) {
1821  idct_dc_add =
1822  idct_add = s->dsp.add_pixels4;
1823  } else {
1824  idct_dc_add = h->h264dsp.h264_idct_dc_add;
1826  }
1827  for (i = 0; i < 16; i++) {
1828  uint8_t *const ptr = dest_y + block_offset[i];
1829  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
1830 
1831  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
1832  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1833  } else {
1834  uint8_t *topright;
1835  int nnz, tr;
1836  uint64_t tr_high;
1837  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
1838  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
1839  assert(s->mb_y || linesize <= block_offset[i]);
1840  if (!topright_avail) {
1841  if (pixel_shift) {
1842  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
1843  topright = (uint8_t *)&tr_high;
1844  } else {
1845  tr = ptr[3 - linesize] * 0x01010101u;
1846  topright = (uint8_t *)&tr;
1847  }
1848  } else
1849  topright = ptr + (4 << pixel_shift) - linesize;
1850  } else
1851  topright = NULL;
1852 
1853  h->hpc.pred4x4[dir](ptr, topright, linesize);
1854  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
1855  if (nnz) {
1856  if (is_h264) {
1857  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1858  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1859  else
1860  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
1861  } else if (CONFIG_SVQ3_DECODER)
1862  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
1863  }
1864  }
1865  }
1866  }
1867  }
1868  } else {
1869  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
1870  if (is_h264) {
1872  if (!transform_bypass)
1873  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
1874  h->mb_luma_dc[p],
1875  h->dequant4_coeff[p][qscale][0]);
1876  else {
1877  static const uint8_t dc_mapping[16] = {
1878  0 * 16, 1 * 16, 4 * 16, 5 * 16,
1879  2 * 16, 3 * 16, 6 * 16, 7 * 16,
1880  8 * 16, 9 * 16, 12 * 16, 13 * 16,
1881  10 * 16, 11 * 16, 14 * 16, 15 * 16 };
1882  for (i = 0; i < 16; i++)
1883  dctcoef_set(h->mb + (p * 256 << pixel_shift),
1884  pixel_shift, dc_mapping[i],
1885  dctcoef_get(h->mb_luma_dc[p],
1886  pixel_shift, i));
1887  }
1888  }
1889  } else if (CONFIG_SVQ3_DECODER)
1890  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
1891  h->mb_luma_dc[p], qscale);
1892  }
1893 }
1894 
1896  int is_h264, int simple,
1897  int transform_bypass,
1898  int pixel_shift,
1899  int *block_offset,
1900  int linesize,
1901  uint8_t *dest_y, int p)
1902 {
1903  MpegEncContext *const s = &h->s;
1904  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
1905  int i;
1906  block_offset += 16 * p;
1907  if (!IS_INTRA4x4(mb_type)) {
1908  if (is_h264) {
1909  if (IS_INTRA16x16(mb_type)) {
1910  if (transform_bypass) {
1911  if (h->sps.profile_idc == 244 &&
1914  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
1915  h->mb + (p * 256 << pixel_shift),
1916  linesize);
1917  } else {
1918  for (i = 0; i < 16; i++)
1919  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
1920  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
1921  s->dsp.add_pixels4(dest_y + block_offset[i],
1922  h->mb + (i * 16 + p * 256 << pixel_shift),
1923  linesize);
1924  }
1925  } else {
1926  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
1927  h->mb + (p * 256 << pixel_shift),
1928  linesize,
1929  h->non_zero_count_cache + p * 5 * 8);
1930  }
1931  } else if (h->cbp & 15) {
1932  if (transform_bypass) {
1933  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
1934  idct_add = IS_8x8DCT(mb_type) ? s->dsp.add_pixels8
1935  : s->dsp.add_pixels4;
1936  for (i = 0; i < 16; i += di)
1937  if (h->non_zero_count_cache[scan8[i + p * 16]])
1938  idct_add(dest_y + block_offset[i],
1939  h->mb + (i * 16 + p * 256 << pixel_shift),
1940  linesize);
1941  } else {
1942  if (IS_8x8DCT(mb_type))
1943  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
1944  h->mb + (p * 256 << pixel_shift),
1945  linesize,
1946  h->non_zero_count_cache + p * 5 * 8);
1947  else
1948  h->h264dsp.h264_idct_add16(dest_y, block_offset,
1949  h->mb + (p * 256 << pixel_shift),
1950  linesize,
1951  h->non_zero_count_cache + p * 5 * 8);
1952  }
1953  }
1954  } else if (CONFIG_SVQ3_DECODER) {
1955  for (i = 0; i < 16; i++)
1956  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
1957  // FIXME benchmark weird rule, & below
1958  uint8_t *const ptr = dest_y + block_offset[i];
1959  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
1960  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
1961  }
1962  }
1963  }
1964 }
1965 
1966 #define BITS 8
1967 #define SIMPLE 1
1968 #include "h264_mb_template.c"
1969 
1970 #undef BITS
1971 #define BITS 16
1972 #include "h264_mb_template.c"
1973 
1974 #undef SIMPLE
1975 #define SIMPLE 0
1976 #include "h264_mb_template.c"
1977 
1979 {
1980  MpegEncContext *const s = &h->s;
1981  const int mb_xy = h->mb_xy;
1982  const int mb_type = s->current_picture.f.mb_type[mb_xy];
1983  int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0;
1984 
1985  if (CHROMA444) {
1986  if (is_complex || h->pixel_shift)
1987  hl_decode_mb_444_complex(h);
1988  else
1989  hl_decode_mb_444_simple_8(h);
1990  } else if (is_complex) {
1991  hl_decode_mb_complex(h);
1992  } else if (h->pixel_shift) {
1993  hl_decode_mb_simple_16(h);
1994  } else
1995  hl_decode_mb_simple_8(h);
1996 }
1997 
1999 {
2000  MpegEncContext *const s = &h->s;
2001  int list, i;
2002  int luma_def, chroma_def;
2003 
2004  h->use_weight = 0;
2005  h->use_weight_chroma = 0;
2007  if (h->sps.chroma_format_idc)
2009  luma_def = 1 << h->luma_log2_weight_denom;
2010  chroma_def = 1 << h->chroma_log2_weight_denom;
2011 
2012  for (list = 0; list < 2; list++) {
2013  h->luma_weight_flag[list] = 0;
2014  h->chroma_weight_flag[list] = 0;
2015  for (i = 0; i < h->ref_count[list]; i++) {
2016  int luma_weight_flag, chroma_weight_flag;
2017 
2018  luma_weight_flag = get_bits1(&s->gb);
2019  if (luma_weight_flag) {
2020  h->luma_weight[i][list][0] = get_se_golomb(&s->gb);
2021  h->luma_weight[i][list][1] = get_se_golomb(&s->gb);
2022  if (h->luma_weight[i][list][0] != luma_def ||
2023  h->luma_weight[i][list][1] != 0) {
2024  h->use_weight = 1;
2025  h->luma_weight_flag[list] = 1;
2026  }
2027  } else {
2028  h->luma_weight[i][list][0] = luma_def;
2029  h->luma_weight[i][list][1] = 0;
2030  }
2031 
2032  if (h->sps.chroma_format_idc) {
2033  chroma_weight_flag = get_bits1(&s->gb);
2034  if (chroma_weight_flag) {
2035  int j;
2036  for (j = 0; j < 2; j++) {
2037  h->chroma_weight[i][list][j][0] = get_se_golomb(&s->gb);
2038  h->chroma_weight[i][list][j][1] = get_se_golomb(&s->gb);
2039  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2040  h->chroma_weight[i][list][j][1] != 0) {
2041  h->use_weight_chroma = 1;
2042  h->chroma_weight_flag[list] = 1;
2043  }
2044  }
2045  } else {
2046  int j;
2047  for (j = 0; j < 2; j++) {
2048  h->chroma_weight[i][list][j][0] = chroma_def;
2049  h->chroma_weight[i][list][j][1] = 0;
2050  }
2051  }
2052  }
2053  }
2055  break;
2056  }
2057  h->use_weight = h->use_weight || h->use_weight_chroma;
2058  return 0;
2059 }
2060 
2066 static void implicit_weight_table(H264Context *h, int field)
2067 {
2068  MpegEncContext *const s = &h->s;
2069  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2070 
2071  for (i = 0; i < 2; i++) {
2072  h->luma_weight_flag[i] = 0;
2073  h->chroma_weight_flag[i] = 0;
2074  }
2075 
2076  if (field < 0) {
2077  if (s->picture_structure == PICT_FRAME) {
2078  cur_poc = s->current_picture_ptr->poc;
2079  } else {
2080  cur_poc = s->current_picture_ptr->field_poc[s->picture_structure - 1];
2081  }
2082  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF &&
2083  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2084  h->use_weight = 0;
2085  h->use_weight_chroma = 0;
2086  return;
2087  }
2088  ref_start = 0;
2089  ref_count0 = h->ref_count[0];
2090  ref_count1 = h->ref_count[1];
2091  } else {
2092  cur_poc = s->current_picture_ptr->field_poc[field];
2093  ref_start = 16;
2094  ref_count0 = 16 + 2 * h->ref_count[0];
2095  ref_count1 = 16 + 2 * h->ref_count[1];
2096  }
2097 
2098  h->use_weight = 2;
2099  h->use_weight_chroma = 2;
2100  h->luma_log2_weight_denom = 5;
2101  h->chroma_log2_weight_denom = 5;
2102 
2103  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2104  int poc0 = h->ref_list[0][ref0].poc;
2105  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2106  int w = 32;
2107  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2108  int poc1 = h->ref_list[1][ref1].poc;
2109  int td = av_clip(poc1 - poc0, -128, 127);
2110  if (td) {
2111  int tb = av_clip(cur_poc - poc0, -128, 127);
2112  int tx = (16384 + (FFABS(td) >> 1)) / td;
2113  int dist_scale_factor = (tb * tx + 32) >> 8;
2114  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2115  w = 64 - dist_scale_factor;
2116  }
2117  }
2118  if (field < 0) {
2119  h->implicit_weight[ref0][ref1][0] =
2120  h->implicit_weight[ref0][ref1][1] = w;
2121  } else {
2122  h->implicit_weight[ref0][ref1][field] = w;
2123  }
2124  }
2125  }
2126 }
2127 
2131 static void idr(H264Context *h)
2132 {
2134  h->prev_frame_num = 0;
2135  h->prev_frame_num_offset = 0;
2136  h->prev_poc_msb =
2137  h->prev_poc_lsb = 0;
2138 }
2139 
2140 /* forget old pics after a seek */
2141 static void flush_change(H264Context *h)
2142 {
2143  int i;
2144  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2145  h->last_pocs[i] = INT_MIN;
2146  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2147  h->prev_interlaced_frame = 1;
2148  idr(h);
2149  if (h->s.current_picture_ptr)
2150  h->s.current_picture_ptr->f.reference = 0;
2151  h->s.first_field = 0;
2152  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2153  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2154  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2155  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2156  ff_h264_reset_sei(h);
2157 }
2158 
2159 /* forget old pics after a seek */
2160 static void flush_dpb(AVCodecContext *avctx)
2161 {
2162  H264Context *h = avctx->priv_data;
2163  int i;
2164 
2165  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
2166  if (h->delayed_pic[i])
2167  h->delayed_pic[i]->f.reference = 0;
2168  h->delayed_pic[i] = NULL;
2169  }
2170 
2171  flush_change(h);
2172  ff_mpeg_flush(avctx);
2173 }
2174 
2175 static int init_poc(H264Context *h)
2176 {
2177  MpegEncContext *const s = &h->s;
2178  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2179  int field_poc[2];
2180  Picture *cur = s->current_picture_ptr;
2181 
2183  if (h->frame_num < h->prev_frame_num)
2184  h->frame_num_offset += max_frame_num;
2185 
2186  if (h->sps.poc_type == 0) {
2187  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2188 
2189  if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2190  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2191  else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2192  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2193  else
2194  h->poc_msb = h->prev_poc_msb;
2195  field_poc[0] =
2196  field_poc[1] = h->poc_msb + h->poc_lsb;
2197  if (s->picture_structure == PICT_FRAME)
2198  field_poc[1] += h->delta_poc_bottom;
2199  } else if (h->sps.poc_type == 1) {
2200  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2201  int i;
2202 
2203  if (h->sps.poc_cycle_length != 0)
2204  abs_frame_num = h->frame_num_offset + h->frame_num;
2205  else
2206  abs_frame_num = 0;
2207 
2208  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2209  abs_frame_num--;
2210 
2211  expected_delta_per_poc_cycle = 0;
2212  for (i = 0; i < h->sps.poc_cycle_length; i++)
2213  // FIXME integrate during sps parse
2214  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2215 
2216  if (abs_frame_num > 0) {
2217  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2218  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2219 
2220  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2221  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2222  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2223  } else
2224  expectedpoc = 0;
2225 
2226  if (h->nal_ref_idc == 0)
2227  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2228 
2229  field_poc[0] = expectedpoc + h->delta_poc[0];
2230  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2231 
2232  if (s->picture_structure == PICT_FRAME)
2233  field_poc[1] += h->delta_poc[1];
2234  } else {
2235  int poc = 2 * (h->frame_num_offset + h->frame_num);
2236 
2237  if (!h->nal_ref_idc)
2238  poc--;
2239 
2240  field_poc[0] = poc;
2241  field_poc[1] = poc;
2242  }
2243 
2245  s->current_picture_ptr->field_poc[0] = field_poc[0];
2247  s->current_picture_ptr->field_poc[1] = field_poc[1];
2248  cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]);
2249 
2250  return 0;
2251 }
2252 
2257 {
2258  int i;
2259  for (i = 0; i < 16; i++) {
2260 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2261  h->zigzag_scan[i] = T(zigzag_scan[i]);
2262  h->field_scan[i] = T(field_scan[i]);
2263 #undef T
2264  }
2265  for (i = 0; i < 64; i++) {
2266 #define T(x) (x >> 3) | ((x & 7) << 3)
2267  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2269  h->field_scan8x8[i] = T(field_scan8x8[i]);
2271 #undef T
2272  }
2273  if (h->sps.transform_bypass) { // FIXME same ugly
2280  } else {
2281  h->zigzag_scan_q0 = h->zigzag_scan;
2284  h->field_scan_q0 = h->field_scan;
2287  }
2288 }
2289 
2290 static int field_end(H264Context *h, int in_setup)
2291 {
2292  MpegEncContext *const s = &h->s;
2293  AVCodecContext *const avctx = s->avctx;
2294  int err = 0;
2295  s->mb_y = 0;
2296 
2297  if (!in_setup && !s->droppable)
2300 
2304 
2305  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2306  if (!s->droppable) {
2308  h->prev_poc_msb = h->poc_msb;
2309  h->prev_poc_lsb = h->poc_lsb;
2310  }
2312  h->prev_frame_num = h->frame_num;
2314  }
2315 
2316  if (avctx->hwaccel) {
2317  if (avctx->hwaccel->end_frame(avctx) < 0)
2318  av_log(avctx, AV_LOG_ERROR,
2319  "hardware accelerator failed to decode picture\n");
2320  }
2321 
2325 
2326  /*
2327  * FIXME: Error handling code does not seem to support interlaced
2328  * when slices span multiple rows
2329  * The ff_er_add_slice calls don't work right for bottom
2330  * fields; they cause massive erroneous error concealing
2331  * Error marking covers both fields (top and bottom).
2332  * This causes a mismatched s->error_count
2333  * and a bad error table. Further, the error count goes to
2334  * INT_MAX when called for bottom field, because mb_y is
2335  * past end by one (callers fault) and resync_mb_y != 0
2336  * causes problems for the first MB line, too.
2337  */
2338  if (!FIELD_PICTURE)
2339  ff_er_frame_end(s);
2340 
2341  ff_MPV_frame_end(s);
2342 
2343  h->current_slice = 0;
2344 
2345  return err;
2346 }
2347 
2351 static int clone_slice(H264Context *dst, H264Context *src)
2352 {
2353  int ret;
2354 
2355  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2357  dst->s.current_picture = src->s.current_picture;
2358  dst->s.linesize = src->s.linesize;
2359  dst->s.uvlinesize = src->s.uvlinesize;
2360  dst->s.first_field = src->s.first_field;
2361 
2362  if (!dst->s.edge_emu_buffer &&
2363  (ret = ff_mpv_frame_size_alloc(&dst->s, dst->s.linesize))) {
2364  av_log(dst->s.avctx, AV_LOG_ERROR,
2365  "Failed to allocate scratch buffers\n");
2366  return ret;
2367  }
2368 
2369  dst->prev_poc_msb = src->prev_poc_msb;
2370  dst->prev_poc_lsb = src->prev_poc_lsb;
2372  dst->prev_frame_num = src->prev_frame_num;
2373  dst->short_ref_count = src->short_ref_count;
2374 
2375  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
2376  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
2377  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
2378  memcpy(dst->ref_list, src->ref_list, sizeof(dst->ref_list));
2379 
2380  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
2381  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
2382 
2383  return 0;
2384 }
2385 
2394 {
2395  int profile = sps->profile_idc;
2396 
2397  switch (sps->profile_idc) {
2399  // constraint_set1_flag set to 1
2400  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
2401  break;
2405  // constraint_set3_flag set to 1
2406  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
2407  break;
2408  }
2409 
2410  return profile;
2411 }
2412 
2414 {
2415  MpegEncContext *s = &h->s;
2416 
2417  if (s->flags & CODEC_FLAG_LOW_DELAY ||
2419  !h->sps.num_reorder_frames)) {
2420  if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
2421  av_log(h->s.avctx, AV_LOG_WARNING, "Delayed frames seen. "
2422  "Reenabling low delay requires a codec flush.\n");
2423  else
2424  s->low_delay = 1;
2425  }
2426 
2427  if (s->avctx->has_b_frames < 2)
2428  s->avctx->has_b_frames = !s->low_delay;
2429 
2430  if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
2432  "Different bit depth between chroma and luma", 1);
2433  return AVERROR_PATCHWELCOME;
2434  }
2435 
2436  if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
2438  if (s->avctx->codec &&
2440  (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
2442  "VDPAU decoding does not support video colorspace.\n");
2443  return AVERROR_INVALIDDATA;
2444  }
2445  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
2448  h->pixel_shift = h->sps.bit_depth_luma > 8;
2449 
2451  h->sps.chroma_format_idc);
2453  h->sps.chroma_format_idc);
2454  s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
2455  ff_dsputil_init(&s->dsp, s->avctx);
2457  } else {
2458  av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
2459  h->sps.bit_depth_luma);
2460  return AVERROR_INVALIDDATA;
2461  }
2462  }
2463  return 0;
2464 }
2465 
2467 {
2468  MpegEncContext *const s = &h->s;
2469  switch (h->sps.bit_depth_luma) {
2470  case 9:
2471  if (CHROMA444) {
2472  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2473  return AV_PIX_FMT_GBRP9;
2474  } else
2475  return AV_PIX_FMT_YUV444P9;
2476  } else if (CHROMA422)
2477  return AV_PIX_FMT_YUV422P9;
2478  else
2479  return AV_PIX_FMT_YUV420P9;
2480  break;
2481  case 10:
2482  if (CHROMA444) {
2483  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2484  return AV_PIX_FMT_GBRP10;
2485  } else
2486  return AV_PIX_FMT_YUV444P10;
2487  } else if (CHROMA422)
2488  return AV_PIX_FMT_YUV422P10;
2489  else
2490  return AV_PIX_FMT_YUV420P10;
2491  break;
2492  case 8:
2493  if (CHROMA444) {
2494  if (s->avctx->colorspace == AVCOL_SPC_RGB) {
2495  return AV_PIX_FMT_GBRP;
2496  } else
2499  } else if (CHROMA422) {
2502  } else {
2503  return s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts ?
2504  s->avctx->codec->pix_fmts :
2508  }
2509  break;
2510  default:
2512  "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
2513  return AVERROR_INVALIDDATA;
2514  }
2515 }
2516 
2517 static int h264_slice_header_init(H264Context *h, int reinit)
2518 {
2519  MpegEncContext *const s = &h->s;
2520  int i, ret;
2521 
2523  s->avctx->sample_aspect_ratio = h->sps.sar;
2525 
2526  if (h->sps.timing_info_present_flag) {
2527  int64_t den = h->sps.time_scale;
2528  if (h->x264_build < 44U)
2529  den *= 2;
2531  h->sps.num_units_in_tick, den, 1 << 30);
2532  }
2533 
2535 
2536  if (reinit) {
2537  free_tables(h, 0);
2538  if ((ret = ff_MPV_common_frame_size_change(s)) < 0) {
2539  av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_frame_size_change() failed.\n");
2540  return ret;
2541  }
2542  } else {
2543  if ((ret = ff_MPV_common_init(s) < 0)) {
2544  av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
2545  return ret;
2546  }
2547  }
2548  s->first_field = 0;
2549  h->prev_interlaced_frame = 1;
2550 
2551  init_scan_tables(h);
2552  if (ff_h264_alloc_tables(h) < 0) {
2553  av_log(h->s.avctx, AV_LOG_ERROR,
2554  "Could not allocate memory for h264\n");
2555  return AVERROR(ENOMEM);
2556  }
2557 
2559  if (context_init(h) < 0) {
2560  av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
2561  return -1;
2562  }
2563  } else {
2564  for (i = 1; i < s->slice_context_count; i++) {
2565  H264Context *c;
2566  c = h->thread_context[i] = av_malloc(sizeof(H264Context));
2567  memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
2568  memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
2569  c->h264dsp = h->h264dsp;
2570  c->sps = h->sps;
2571  c->pps = h->pps;
2572  c->pixel_shift = h->pixel_shift;
2573  init_scan_tables(c);
2574  clone_tables(c, h, i);
2575  }
2576 
2577  for (i = 0; i < s->slice_context_count; i++)
2578  if (context_init(h->thread_context[i]) < 0) {
2579  av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
2580  return -1;
2581  }
2582  }
2583 
2584  return 0;
2585 }
2586 
2598 {
2599  MpegEncContext *const s = &h->s;
2600  MpegEncContext *const s0 = &h0->s;
2601  unsigned int first_mb_in_slice;
2602  unsigned int pps_id;
2603  int num_ref_idx_active_override_flag, max_refs, ret;
2604  unsigned int slice_type, tmp, i, j;
2605  int default_ref_list_done = 0;
2606  int last_pic_structure, last_pic_droppable;
2607  int needs_reinit = 0;
2608 
2609  /* FIXME: 2tap qpel isn't implemented for high bit depth. */
2610  if ((s->avctx->flags2 & CODEC_FLAG2_FAST) &&
2611  !h->nal_ref_idc && !h->pixel_shift) {
2614  } else {
2617  }
2618 
2619  first_mb_in_slice = get_ue_golomb(&s->gb);
2620 
2621  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
2622  if (h0->current_slice && FIELD_PICTURE) {
2623  field_end(h, 1);
2624  }
2625 
2626  h0->current_slice = 0;
2627  if (!s0->first_field) {
2628  if (s->current_picture_ptr && !s->droppable &&
2629  s->current_picture_ptr->owner2 == s) {
2632  }
2634  }
2635  }
2636 
2637  slice_type = get_ue_golomb_31(&s->gb);
2638  if (slice_type > 9) {
2639  av_log(h->s.avctx, AV_LOG_ERROR,
2640  "slice type too large (%d) at %d %d\n",
2641  h->slice_type, s->mb_x, s->mb_y);
2642  return -1;
2643  }
2644  if (slice_type > 4) {
2645  slice_type -= 5;
2646  h->slice_type_fixed = 1;
2647  } else
2648  h->slice_type_fixed = 0;
2649 
2650  slice_type = golomb_to_pict_type[slice_type];
2651  if (slice_type == AV_PICTURE_TYPE_I ||
2652  (h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
2653  default_ref_list_done = 1;
2654  }
2655  h->slice_type = slice_type;
2656  h->slice_type_nos = slice_type & 3;
2657 
2658  if (h->nal_unit_type == NAL_IDR_SLICE &&
2660  av_log(h->s.avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
2661  return AVERROR_INVALIDDATA;
2662  }
2663 
2664  // to make a few old functions happy, it's wrong though
2665  s->pict_type = h->slice_type;
2666 
2667  pps_id = get_ue_golomb(&s->gb);
2668  if (pps_id >= MAX_PPS_COUNT) {
2669  av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
2670  return -1;
2671  }
2672  if (!h0->pps_buffers[pps_id]) {
2673  av_log(h->s.avctx, AV_LOG_ERROR,
2674  "non-existing PPS %u referenced\n",
2675  pps_id);
2676  return -1;
2677  }
2678  h->pps = *h0->pps_buffers[pps_id];
2679 
2680  if (!h0->sps_buffers[h->pps.sps_id]) {
2681  av_log(h->s.avctx, AV_LOG_ERROR,
2682  "non-existing SPS %u referenced\n",
2683  h->pps.sps_id);
2684  return -1;
2685  }
2686 
2687  if (h->pps.sps_id != h->current_sps_id ||
2688  h->context_reinitialized ||
2689  h0->sps_buffers[h->pps.sps_id]->new) {
2690  SPS *new_sps = h0->sps_buffers[h->pps.sps_id];
2691 
2692  h0->sps_buffers[h->pps.sps_id]->new = 0;
2693 
2694  if (h->sps.chroma_format_idc != new_sps->chroma_format_idc ||
2695  h->sps.bit_depth_luma != new_sps->bit_depth_luma)
2696  needs_reinit = 1;
2697 
2698  h->current_sps_id = h->pps.sps_id;
2699  h->sps = *h0->sps_buffers[h->pps.sps_id];
2700 
2701  if ((ret = h264_set_parameter_from_sps(h)) < 0)
2702  return ret;
2703  }
2704 
2705  s->avctx->profile = ff_h264_get_profile(&h->sps);
2706  s->avctx->level = h->sps.level_idc;
2707  s->avctx->refs = h->sps.ref_frame_count;
2708 
2709  if (s->mb_width != h->sps.mb_width ||
2710  s->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
2711  needs_reinit = 1;
2712 
2713  s->mb_width = h->sps.mb_width;
2714  s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
2715 
2716  h->b_stride = s->mb_width * 4;
2717 
2718  s->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
2719 
2720  s->width = 16 * s->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1);
2721  if (h->sps.frame_mbs_only_flag)
2722  s->height = 16 * s->mb_height - (1 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
2723  else
2724  s->height = 16 * s->mb_height - (2 << s->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> s->chroma_y_shift) - 1);
2725 
2726  if (FFALIGN(s->avctx->width, 16) == s->width &&
2727  FFALIGN(s->avctx->height, 16) == s->height) {
2728  s->width = s->avctx->width;
2729  s->height = s->avctx->height;
2730  }
2731 
2734  : AVCOL_RANGE_MPEG;
2736  if (s->avctx->colorspace != h->sps.colorspace)
2737  needs_reinit = 1;
2739  s->avctx->color_trc = h->sps.color_trc;
2740  s->avctx->colorspace = h->sps.colorspace;
2741  }
2742  }
2743 
2744  if (s->context_initialized &&
2745  (s->width != s->avctx->width ||
2746  s->height != s->avctx->height ||
2747  needs_reinit ||
2749 
2750  if (h != h0) {
2751  av_log(s->avctx, AV_LOG_ERROR, "changing width/height on "
2752  "slice %d\n", h0->current_slice + 1);
2753  return AVERROR_INVALIDDATA;
2754  }
2755 
2756  flush_change(h);
2757 
2758  if ((ret = get_pixel_format(h)) < 0)
2759  return ret;
2760  s->avctx->pix_fmt = ret;
2761 
2762  av_log(h->s.avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
2763  "pix_fmt: %d\n", s->width, s->height, s->avctx->pix_fmt);
2764 
2765  if ((ret = h264_slice_header_init(h, 1)) < 0) {
2766  av_log(h->s.avctx, AV_LOG_ERROR,
2767  "h264_slice_header_init() failed\n");
2768  return ret;
2769  }
2770  h->context_reinitialized = 1;
2771  }
2772  if (!s->context_initialized) {
2773  if (h != h0) {
2774  av_log(h->s.avctx, AV_LOG_ERROR,
2775  "Cannot (re-)initialize context during parallel decoding.\n");
2776  return -1;
2777  }
2778 
2779  if ((ret = get_pixel_format(h)) < 0)
2780  return ret;
2781  s->avctx->pix_fmt = ret;
2782 
2783  if ((ret = h264_slice_header_init(h, 0)) < 0) {
2784  av_log(h->s.avctx, AV_LOG_ERROR,
2785  "h264_slice_header_init() failed\n");
2786  return ret;
2787  }
2788  }
2789 
2790  if (h == h0 && h->dequant_coeff_pps != pps_id) {
2791  h->dequant_coeff_pps = pps_id;
2793  }
2794 
2795  h->frame_num = get_bits(&s->gb, h->sps.log2_max_frame_num);
2796 
2797  h->mb_mbaff = 0;
2798  h->mb_aff_frame = 0;
2799  last_pic_structure = s0->picture_structure;
2800  last_pic_droppable = s0->droppable;
2801  s->droppable = h->nal_ref_idc == 0;
2802  if (h->sps.frame_mbs_only_flag) {
2804  } else {
2805  if (get_bits1(&s->gb)) { // field_pic_flag
2806  s->picture_structure = PICT_TOP_FIELD + get_bits1(&s->gb); // bottom_field_flag
2807  } else {
2809  h->mb_aff_frame = h->sps.mb_aff;
2810  }
2811  }
2813 
2814  if (h0->current_slice != 0) {
2815  if (last_pic_structure != s->picture_structure ||
2816  last_pic_droppable != s->droppable) {
2817  av_log(h->s.avctx, AV_LOG_ERROR,
2818  "Changing field mode (%d -> %d) between slices is not allowed\n",
2819  last_pic_structure, s->picture_structure);
2820  s->picture_structure = last_pic_structure;
2821  s->droppable = last_pic_droppable;
2822  return AVERROR_INVALIDDATA;
2823  } else if (!s0->current_picture_ptr) {
2825  "unset current_picture_ptr on %d. slice\n",
2826  h0->current_slice + 1);
2827  return AVERROR_INVALIDDATA;
2828  }
2829  } else {
2830  /* Shorten frame num gaps so we don't have to allocate reference
2831  * frames just to throw them away */
2832  if (h->frame_num != h->prev_frame_num) {
2833  int unwrap_prev_frame_num = h->prev_frame_num;
2834  int max_frame_num = 1 << h->sps.log2_max_frame_num;
2835 
2836  if (unwrap_prev_frame_num > h->frame_num)
2837  unwrap_prev_frame_num -= max_frame_num;
2838 
2839  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
2840  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
2841  if (unwrap_prev_frame_num < 0)
2842  unwrap_prev_frame_num += max_frame_num;
2843 
2844  h->prev_frame_num = unwrap_prev_frame_num;
2845  }
2846  }
2847 
2848  /* See if we have a decoded first field looking for a pair...
2849  * Here, we're using that to see if we should mark previously
2850  * decode frames as "finished".
2851  * We have to do that before the "dummy" in-between frame allocation,
2852  * since that can modify s->current_picture_ptr. */
2853  if (s0->first_field) {
2854  assert(s0->current_picture_ptr);
2855  assert(s0->current_picture_ptr->f.data[0]);
2857 
2858  /* Mark old field/frame as completed */
2859  if (!last_pic_droppable && s0->current_picture_ptr->owner2 == s0) {
2861  last_pic_structure == PICT_BOTTOM_FIELD);
2862  }
2863 
2864  /* figure out if we have a complementary field pair */
2865  if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
2866  /* Previous field is unmatched. Don't display it, but let it
2867  * remain for reference if marked as such. */
2868  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
2870  last_pic_structure == PICT_TOP_FIELD);
2871  }
2872  } else {
2873  if (s0->current_picture_ptr->frame_num != h->frame_num) {
2874  /* This and previous field were reference, but had
2875  * different frame_nums. Consider this field first in
2876  * pair. Throw away previous field except for reference
2877  * purposes. */
2878  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
2880  last_pic_structure == PICT_TOP_FIELD);
2881  }
2882  } else {
2883  /* Second field in complementary pair */
2884  if (!((last_pic_structure == PICT_TOP_FIELD &&
2886  (last_pic_structure == PICT_BOTTOM_FIELD &&
2889  "Invalid field mode combination %d/%d\n",
2890  last_pic_structure, s->picture_structure);
2891  s->picture_structure = last_pic_structure;
2892  s->droppable = last_pic_droppable;
2893  return AVERROR_INVALIDDATA;
2894  } else if (last_pic_droppable != s->droppable) {
2896  "Cannot combine reference and non-reference fields in the same frame\n");
2898  s->picture_structure = last_pic_structure;
2899  s->droppable = last_pic_droppable;
2900  return AVERROR_PATCHWELCOME;
2901  }
2902 
2903  /* Take ownership of this buffer. Note that if another thread owned
2904  * the first field of this buffer, we're not operating on that pointer,
2905  * so the original thread is still responsible for reporting progress
2906  * on that first field (or if that was us, we just did that above).
2907  * By taking ownership, we assign responsibility to ourselves to
2908  * report progress on the second field. */
2909  s0->current_picture_ptr->owner2 = s0;
2910  }
2911  }
2912  }
2913 
2914  while (h->frame_num != h->prev_frame_num &&
2915  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
2916  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
2917  av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
2918  h->frame_num, h->prev_frame_num);
2919  if (ff_h264_frame_start(h) < 0)
2920  return -1;
2921  h->prev_frame_num++;
2922  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
2926  if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
2928  return ret;
2929  if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
2931  return AVERROR_INVALIDDATA;
2932  /* Error concealment: if a ref is missing, copy the previous ref in its place.
2933  * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions
2934  * about there being no actual duplicates.
2935  * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're
2936  * concealing a lost frame, this probably isn't noticeable by comparison, but it should
2937  * be fixed. */
2938  if (h->short_ref_count) {
2939  if (prev) {
2940  av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
2941  (const uint8_t **)prev->f.data, prev->f.linesize,
2942  s->avctx->pix_fmt, s->mb_width * 16, s->mb_height * 16);
2943  h->short_ref[0]->poc = prev->poc + 2;
2944  }
2945  h->short_ref[0]->frame_num = h->prev_frame_num;
2946  }
2947  }
2948 
2949  /* See if we have a decoded first field looking for a pair...
2950  * We're using that to see whether to continue decoding in that
2951  * frame, or to allocate a new one. */
2952  if (s0->first_field) {
2953  assert(s0->current_picture_ptr);
2954  assert(s0->current_picture_ptr->f.data[0]);
2956 
2957  /* figure out if we have a complementary field pair */
2958  if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
2959  /* Previous field is unmatched. Don't display it, but let it
2960  * remain for reference if marked as such. */
2961  s0->current_picture_ptr = NULL;
2962  s0->first_field = FIELD_PICTURE;
2963  } else {
2964  if (s0->current_picture_ptr->frame_num != h->frame_num) {
2965  /* This and the previous field had different frame_nums.
2966  * Consider this field first in pair. Throw away previous
2967  * one except for reference purposes. */
2968  s0->first_field = 1;
2969  s0->current_picture_ptr = NULL;
2970  } else {
2971  /* Second field in complementary pair */
2972  s0->first_field = 0;
2973  }
2974  }
2975  } else {
2976  /* Frame or first field in a potentially complementary pair */
2977  s0->first_field = FIELD_PICTURE;
2978  }
2979 
2980  if (!FIELD_PICTURE || s0->first_field) {
2981  if (ff_h264_frame_start(h) < 0) {
2982  s0->first_field = 0;
2983  return -1;
2984  }
2985  } else {
2987  }
2988  }
2989  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
2990  return ret;
2991 
2992  s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
2993 
2994  assert(s->mb_num == s->mb_width * s->mb_height);
2995  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num ||
2996  first_mb_in_slice >= s->mb_num) {
2997  av_log(h->s.avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
2998  return -1;
2999  }
3000  s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width;
3001  s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE;
3003  s->resync_mb_y = s->mb_y = s->mb_y + 1;
3004  assert(s->mb_y < s->mb_height);
3005 
3006  if (s->picture_structure == PICT_FRAME) {
3007  h->curr_pic_num = h->frame_num;
3008  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3009  } else {
3010  h->curr_pic_num = 2 * h->frame_num + 1;
3011  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3012  }
3013 
3014  if (h->nal_unit_type == NAL_IDR_SLICE)
3015  get_ue_golomb(&s->gb); /* idr_pic_id */
3016 
3017  if (h->sps.poc_type == 0) {
3018  h->poc_lsb = get_bits(&s->gb, h->sps.log2_max_poc_lsb);
3019 
3020  if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME)
3021  h->delta_poc_bottom = get_se_golomb(&s->gb);
3022  }
3023 
3024  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3025  h->delta_poc[0] = get_se_golomb(&s->gb);
3026 
3027  if (h->pps.pic_order_present == 1 && s->picture_structure == PICT_FRAME)
3028  h->delta_poc[1] = get_se_golomb(&s->gb);
3029  }
3030 
3031  init_poc(h);
3032 
3035 
3036  // set defaults, might be overridden a few lines later
3037  h->ref_count[0] = h->pps.ref_count[0];
3038  h->ref_count[1] = h->pps.ref_count[1];
3039 
3040  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3043  num_ref_idx_active_override_flag = get_bits1(&s->gb);
3044 
3045  if (num_ref_idx_active_override_flag) {
3046  h->ref_count[0] = get_ue_golomb(&s->gb) + 1;
3047  if (h->ref_count[0] < 1)
3048  return AVERROR_INVALIDDATA;
3049  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3050  h->ref_count[1] = get_ue_golomb(&s->gb) + 1;
3051  if (h->ref_count[1] < 1)
3052  return AVERROR_INVALIDDATA;
3053  }
3054  }
3055 
3057  h->list_count = 2;
3058  else
3059  h->list_count = 1;
3060  } else {
3061  h->list_count = 0;
3062  h->ref_count[0] = h->ref_count[1] = 0;
3063  }
3064 
3065 
3066  max_refs = s->picture_structure == PICT_FRAME ? 16 : 32;
3067 
3068  if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) {
3069  av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
3070  h->ref_count[0] = h->ref_count[1] = 0;
3071  return AVERROR_INVALIDDATA;
3072  }
3073 
3074  if (!default_ref_list_done)
3076 
3077  if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
3079  h->ref_count[1] = h->ref_count[0] = 0;
3080  return -1;
3081  }
3082 
3083  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3084  s->last_picture_ptr = &h->ref_list[0][0];
3085  s->last_picture_ptr->owner2 = s;
3087  }
3088  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3089  s->next_picture_ptr = &h->ref_list[1][0];
3090  s->next_picture_ptr->owner2 = s;
3092  }
3093 
3094  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3095  (h->pps.weighted_bipred_idc == 1 &&
3097  pred_weight_table(h);
3098  else if (h->pps.weighted_bipred_idc == 2 &&
3100  implicit_weight_table(h, -1);
3101  } else {
3102  h->use_weight = 0;
3103  for (i = 0; i < 2; i++) {
3104  h->luma_weight_flag[i] = 0;
3105  h->chroma_weight_flag[i] = 0;
3106  }
3107  }
3108 
3109  // If frame-mt is enabled, only update mmco tables for the first slice
3110  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3111  // or h->mmco, which will cause ref list mix-ups and decoding errors
3112  // further down the line. This may break decoding if the first slice is
3113  // corrupt, thus we only do this if frame-mt is enabled.
3114  if (h->nal_ref_idc &&
3117  h0->current_slice == 0) < 0 &&
3119  return AVERROR_INVALIDDATA;
3120 
3121  if (FRAME_MBAFF) {
3123 
3125  implicit_weight_table(h, 0);
3126  implicit_weight_table(h, 1);
3127  }
3128  }
3129 
3133 
3134  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3135  tmp = get_ue_golomb_31(&s->gb);
3136  if (tmp > 2) {
3137  av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3138  return -1;
3139  }
3140  h->cabac_init_idc = tmp;
3141  }
3142 
3143  h->last_qscale_diff = 0;
3144  tmp = h->pps.init_qp + get_se_golomb(&s->gb);
3145  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3146  av_log(s->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3147  return -1;
3148  }
3149  s->qscale = tmp;
3150  h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
3151  h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
3152  // FIXME qscale / qp ... stuff
3153  if (h->slice_type == AV_PICTURE_TYPE_SP)
3154  get_bits1(&s->gb); /* sp_for_switch_flag */
3155  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3157  get_se_golomb(&s->gb); /* slice_qs_delta */
3158 
3159  h->deblocking_filter = 1;
3160  h->slice_alpha_c0_offset = 52;
3161  h->slice_beta_offset = 52;
3163  tmp = get_ue_golomb_31(&s->gb);
3164  if (tmp > 2) {
3166  "deblocking_filter_idc %u out of range\n", tmp);
3167  return -1;
3168  }
3169  h->deblocking_filter = tmp;
3170  if (h->deblocking_filter < 2)
3171  h->deblocking_filter ^= 1; // 1<->0
3172 
3173  if (h->deblocking_filter) {
3174  h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1;
3175  h->slice_beta_offset += get_se_golomb(&s->gb) << 1;
3176  if (h->slice_alpha_c0_offset > 104U ||
3177  h->slice_beta_offset > 104U) {
3179  "deblocking filter parameters %d %d out of range\n",
3181  return -1;
3182  }
3183  }
3184  }
3185 
3186  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3192  h->nal_ref_idc == 0))
3193  h->deblocking_filter = 0;
3194 
3195  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3196  if (s->avctx->flags2 & CODEC_FLAG2_FAST) {
3197  /* Cheat slightly for speed:
3198  * Do not bother to deblock across slices. */
3199  h->deblocking_filter = 2;
3200  } else {
3201  h0->max_contexts = 1;
3202  if (!h0->single_decode_warning) {
3203  av_log(s->avctx, AV_LOG_INFO,
3204  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3205  h0->single_decode_warning = 1;
3206  }
3207  if (h != h0) {
3208  av_log(h->s.avctx, AV_LOG_ERROR,
3209  "Deblocking switched inside frame.\n");
3210  return 1;
3211  }
3212  }
3213  }
3214  h->qp_thresh = 15 + 52 -
3216  FFMAX3(0,
3218  h->pps.chroma_qp_index_offset[1]) +
3219  6 * (h->sps.bit_depth_luma - 8);
3220 
3221  h0->last_slice_type = slice_type;
3222  h->slice_num = ++h0->current_slice;
3223  if (h->slice_num >= MAX_SLICES) {
3225  "Too many slices, increase MAX_SLICES and recompile\n");
3226  }
3227 
3228  for (j = 0; j < 2; j++) {
3229  int id_list[16];
3230  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3231  for (i = 0; i < 16; i++) {
3232  id_list[i] = 60;
3233  if (h->ref_list[j][i].f.data[0]) {
3234  int k;
3235  uint8_t *base = h->ref_list[j][i].f.base[0];
3236  for (k = 0; k < h->short_ref_count; k++)
3237  if (h->short_ref[k]->f.base[0] == base) {
3238  id_list[i] = k;
3239  break;
3240  }
3241  for (k = 0; k < h->long_ref_count; k++)
3242  if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
3243  id_list[i] = h->short_ref_count + k;
3244  break;
3245  }
3246  }
3247  }
3248 
3249  ref2frm[0] =
3250  ref2frm[1] = -1;
3251  for (i = 0; i < 16; i++)
3252  ref2frm[i + 2] = 4 * id_list[i] +
3253  (h->ref_list[j][i].f.reference & 3);
3254  ref2frm[18 + 0] =
3255  ref2frm[18 + 1] = -1;
3256  for (i = 16; i < 48; i++)
3257  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3258  (h->ref_list[j][i].f.reference & 3);
3259  }
3260 
3261  // FIXME: fix draw_edges + PAFF + frame threads
3263  (!h->sps.frame_mbs_only_flag &&
3265  ? 0 : 16;
3267 
3268  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
3269  av_log(h->s.avctx, AV_LOG_DEBUG,
3270  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3271  h->slice_num,
3272  (s->picture_structure == PICT_FRAME ? "F" : s->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3273  first_mb_in_slice,
3275  h->slice_type_fixed ? " fix" : "",
3276  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3277  pps_id, h->frame_num,
3280  h->ref_count[0], h->ref_count[1],
3281  s->qscale,
3282  h->deblocking_filter,
3283  h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26,
3284  h->use_weight,
3285  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3286  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3287  }
3288 
3289  return 0;
3290 }
3291 
3293 {
3294  switch (h->slice_type) {
3295  case AV_PICTURE_TYPE_P:
3296  return 0;
3297  case AV_PICTURE_TYPE_B:
3298  return 1;
3299  case AV_PICTURE_TYPE_I:
3300  return 2;
3301  case AV_PICTURE_TYPE_SP:
3302  return 3;
3303  case AV_PICTURE_TYPE_SI:
3304  return 4;
3305  default:
3306  return -1;
3307  }
3308 }
3309 
3311  MpegEncContext *const s,
3312  int mb_type, int top_xy,
3313  int left_xy[LEFT_MBS],
3314  int top_type,
3315  int left_type[LEFT_MBS],
3316  int mb_xy, int list)
3317 {
3318  int b_stride = h->b_stride;
3319  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3320  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3321  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3322  if (USES_LIST(top_type, list)) {
3323  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3324  const int b8_xy = 4 * top_xy + 2;
3325  int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3326  AV_COPY128(mv_dst - 1 * 8, s->current_picture.f.motion_val[list][b_xy + 0]);
3327  ref_cache[0 - 1 * 8] =
3328  ref_cache[1 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 0]];
3329  ref_cache[2 - 1 * 8] =
3330  ref_cache[3 - 1 * 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 1]];
3331  } else {
3332  AV_ZERO128(mv_dst - 1 * 8);
3333  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3334  }
3335 
3336  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
3337  if (USES_LIST(left_type[LTOP], list)) {
3338  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
3339  const int b8_xy = 4 * left_xy[LTOP] + 1;
3340  int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3341  AV_COPY32(mv_dst - 1 + 0, s->current_picture.f.motion_val[list][b_xy + b_stride * 0]);
3342  AV_COPY32(mv_dst - 1 + 8, s->current_picture.f.motion_val[list][b_xy + b_stride * 1]);
3343  AV_COPY32(mv_dst - 1 + 16, s->current_picture.f.motion_val[list][b_xy + b_stride * 2]);
3344  AV_COPY32(mv_dst - 1 + 24, s->current_picture.f.motion_val[list][b_xy + b_stride * 3]);
3345  ref_cache[-1 + 0] =
3346  ref_cache[-1 + 8] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 0]];
3347  ref_cache[-1 + 16] =
3348  ref_cache[-1 + 24] = ref2frm[list][s->current_picture.f.ref_index[list][b8_xy + 2 * 1]];
3349  } else {
3350  AV_ZERO32(mv_dst - 1 + 0);
3351  AV_ZERO32(mv_dst - 1 + 8);
3352  AV_ZERO32(mv_dst - 1 + 16);
3353  AV_ZERO32(mv_dst - 1 + 24);
3354  ref_cache[-1 + 0] =
3355  ref_cache[-1 + 8] =
3356  ref_cache[-1 + 16] =
3357  ref_cache[-1 + 24] = LIST_NOT_USED;
3358  }
3359  }
3360  }
3361 
3362  if (!USES_LIST(mb_type, list)) {
3363  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
3364  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3365  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3366  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3367  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3368  return;
3369  }
3370 
3371  {
3372  int8_t *ref = &s->current_picture.f.ref_index[list][4 * mb_xy];
3373  int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3374  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
3375  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
3376  AV_WN32A(&ref_cache[0 * 8], ref01);
3377  AV_WN32A(&ref_cache[1 * 8], ref01);
3378  AV_WN32A(&ref_cache[2 * 8], ref23);
3379  AV_WN32A(&ref_cache[3 * 8], ref23);
3380  }
3381 
3382  {
3383  int16_t(*mv_src)[2] = &s->current_picture.f.motion_val[list][4 * s->mb_x + 4 * s->mb_y * b_stride];
3384  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
3385  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
3386  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
3387  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
3388  }
3389 }
3390 
3395 static int fill_filter_caches(H264Context *h, int mb_type)
3396 {
3397  MpegEncContext *const s = &h->s;
3398  const int mb_xy = h->mb_xy;
3399  int top_xy, left_xy[LEFT_MBS];
3400  int top_type, left_type[LEFT_MBS];
3401  uint8_t *nnz;
3402  uint8_t *nnz_cache;
3403 
3404  top_xy = mb_xy - (s->mb_stride << MB_FIELD);
3405 
3406  /* Wow, what a mess, why didn't they simplify the interlacing & intra
3407  * stuff, I can't imagine that these complex rules are worth it. */
3408 
3409  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
3410  if (FRAME_MBAFF) {
3411  const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
3412  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
3413  if (s->mb_y & 1) {
3414  if (left_mb_field_flag != curr_mb_field_flag)
3415  left_xy[LTOP] -= s->mb_stride;
3416  } else {
3417  if (curr_mb_field_flag)
3418  top_xy += s->mb_stride &
3419  (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1);
3420  if (left_mb_field_flag != curr_mb_field_flag)
3421  left_xy[LBOT] += s->mb_stride;
3422  }
3423  }
3424 
3425  h->top_mb_xy = top_xy;
3426  h->left_mb_xy[LTOP] = left_xy[LTOP];
3427  h->left_mb_xy[LBOT] = left_xy[LBOT];
3428  {
3429  /* For sufficiently low qp, filtering wouldn't do anything.
3430  * This is a conservative estimate: could also check beta_offset
3431  * and more accurate chroma_qp. */
3432  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
3433  int qp = s->current_picture.f.qscale_table[mb_xy];
3434  if (qp <= qp_thresh &&
3435  (left_xy[LTOP] < 0 ||
3436  ((qp + s->current_picture.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
3437  (top_xy < 0 ||
3438  ((qp + s->current_picture.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
3439  if (!FRAME_MBAFF)
3440  return 1;
3441  if ((left_xy[LTOP] < 0 ||
3442  ((qp + s->current_picture.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
3443  (top_xy < s->mb_stride ||
3444  ((qp + s->current_picture.f.qscale_table[top_xy - s->mb_stride] + 1) >> 1) <= qp_thresh))
3445  return 1;
3446  }
3447  }
3448 
3449  top_type = s->current_picture.f.mb_type[top_xy];
3450  left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
3451  left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
3452  if (h->deblocking_filter == 2) {
3453  if (h->slice_table[top_xy] != h->slice_num)
3454  top_type = 0;
3455  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
3456  left_type[LTOP] = left_type[LBOT] = 0;
3457  } else {
3458  if (h->slice_table[top_xy] == 0xFFFF)
3459  top_type = 0;
3460  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
3461  left_type[LTOP] = left_type[LBOT] = 0;
3462  }
3463  h->top_type = top_type;
3464  h->left_type[LTOP] = left_type[LTOP];
3465  h->left_type[LBOT] = left_type[LBOT];
3466 
3467  if (IS_INTRA(mb_type))
3468  return 0;
3469 
3470  fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy,
3471  top_type, left_type, mb_xy, 0);
3472  if (h->list_count == 2)
3473  fill_filter_caches_inter(h, s, mb_type, top_xy, left_xy,
3474  top_type, left_type, mb_xy, 1);
3475 
3476  nnz = h->non_zero_count[mb_xy];
3477  nnz_cache = h->non_zero_count_cache;
3478  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
3479  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
3480  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
3481  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
3482  h->cbp = h->cbp_table[mb_xy];
3483 
3484  if (top_type) {
3485  nnz = h->non_zero_count[top_xy];
3486  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
3487  }
3488 
3489  if (left_type[LTOP]) {
3490  nnz = h->non_zero_count[left_xy[LTOP]];
3491  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
3492  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
3493  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
3494  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
3495  }
3496 
3497  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
3498  * from what the loop filter needs */
3499  if (!CABAC && h->pps.transform_8x8_mode) {
3500  if (IS_8x8DCT(top_type)) {
3501  nnz_cache[4 + 8 * 0] =
3502  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
3503  nnz_cache[6 + 8 * 0] =
3504  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
3505  }
3506  if (IS_8x8DCT(left_type[LTOP])) {
3507  nnz_cache[3 + 8 * 1] =
3508  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
3509  }
3510  if (IS_8x8DCT(left_type[LBOT])) {
3511  nnz_cache[3 + 8 * 3] =
3512  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
3513  }
3514 
3515  if (IS_8x8DCT(mb_type)) {
3516  nnz_cache[scan8[0]] =
3517  nnz_cache[scan8[1]] =
3518  nnz_cache[scan8[2]] =
3519  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
3520 
3521  nnz_cache[scan8[0 + 4]] =
3522  nnz_cache[scan8[1 + 4]] =
3523  nnz_cache[scan8[2 + 4]] =
3524  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
3525 
3526  nnz_cache[scan8[0 + 8]] =
3527  nnz_cache[scan8[1 + 8]] =
3528  nnz_cache[scan8[2 + 8]] =
3529  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
3530 
3531  nnz_cache[scan8[0 + 12]] =
3532  nnz_cache[scan8[1 + 12]] =
3533  nnz_cache[scan8[2 + 12]] =
3534  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
3535  }
3536  }
3537 
3538  return 0;
3539 }
3540 
3541 static void loop_filter(H264Context *h, int start_x, int end_x)
3542 {
3543  MpegEncContext *const s = &h->s;
3544  uint8_t *dest_y, *dest_cb, *dest_cr;
3545  int linesize, uvlinesize, mb_x, mb_y;
3546  const int end_mb_y = s->mb_y + FRAME_MBAFF;
3547  const int old_slice_type = h->slice_type;
3548  const int pixel_shift = h->pixel_shift;
3549  const int block_h = 16 >> s->chroma_y_shift;
3550 
3551  if (h->deblocking_filter) {
3552  for (mb_x = start_x; mb_x < end_x; mb_x++)
3553  for (mb_y = end_mb_y - FRAME_MBAFF; mb_y <= end_mb_y; mb_y++) {
3554  int mb_xy, mb_type;
3555  mb_xy = h->mb_xy = mb_x + mb_y * s->mb_stride;
3556  h->slice_num = h->slice_table[mb_xy];
3557  mb_type = s->current_picture.f.mb_type[mb_xy];
3558  h->list_count = h->list_counts[mb_xy];
3559 
3560  if (FRAME_MBAFF)
3561  h->mb_mbaff =
3562  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
3563 
3564  s->mb_x = mb_x;
3565  s->mb_y = mb_y;
3566  dest_y = s->current_picture.f.data[0] +
3567  ((mb_x << pixel_shift) + mb_y * s->linesize) * 16;
3568  dest_cb = s->current_picture.f.data[1] +
3569  (mb_x << pixel_shift) * (8 << CHROMA444) +
3570  mb_y * s->uvlinesize * block_h;
3571  dest_cr = s->current_picture.f.data[2] +
3572  (mb_x << pixel_shift) * (8 << CHROMA444) +
3573  mb_y * s->uvlinesize * block_h;
3574  // FIXME simplify above
3575 
3576  if (MB_FIELD) {
3577  linesize = h->mb_linesize = s->linesize * 2;
3578  uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
3579  if (mb_y & 1) { // FIXME move out of this function?
3580  dest_y -= s->linesize * 15;
3581  dest_cb -= s->uvlinesize * (block_h - 1);
3582  dest_cr -= s->uvlinesize * (block_h - 1);
3583  }
3584  } else {
3585  linesize = h->mb_linesize = s->linesize;
3586  uvlinesize = h->mb_uvlinesize = s->uvlinesize;
3587  }
3588  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
3589  uvlinesize, 0);
3590  if (fill_filter_caches(h, mb_type))
3591  continue;
3592  h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]);
3593  h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mb_xy]);
3594 
3595  if (FRAME_MBAFF) {
3596  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
3597  linesize, uvlinesize);
3598  } else {
3599  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
3600  dest_cr, linesize, uvlinesize);
3601  }
3602  }
3603  }
3604  h->slice_type = old_slice_type;
3605  s->mb_x = end_x;
3606  s->mb_y = end_mb_y - FRAME_MBAFF;
3607  h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
3608  h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
3609 }
3610 
3612 {
3613  MpegEncContext *const s = &h->s;
3614  const int mb_xy = s->mb_x + s->mb_y * s->mb_stride;
3615  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
3616  s->current_picture.f.mb_type[mb_xy - 1] :
3617  (h->slice_table[mb_xy - s->mb_stride] == h->slice_num) ?
3618  s->current_picture.f.mb_type[mb_xy - s->mb_stride] : 0;
3619  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
3620 }
3621 
3626 {
3627  MpegEncContext *const s = &h->s;
3628  int top = 16 * (s->mb_y >> FIELD_PICTURE);
3629  int pic_height = 16 * s->mb_height >> FIELD_PICTURE;
3630  int height = 16 << FRAME_MBAFF;
3631  int deblock_border = (16 + 4) << FRAME_MBAFF;
3632 
3633  if (h->deblocking_filter) {
3634  if ((top + height) >= pic_height)
3635  height += deblock_border;
3636  top -= deblock_border;
3637  }
3638 
3639  if (top >= pic_height || (top + height) < h->emu_edge_height)
3640  return;
3641 
3642  height = FFMIN(height, pic_height - top);
3643  if (top < h->emu_edge_height) {
3644  height = top + height;
3645  top = 0;
3646  }
3647 
3648  ff_draw_horiz_band(s, top, height);
3649 
3650  if (s->droppable)
3651  return;
3652 
3653  ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1,
3655 }
3656 
3657 static int decode_slice(struct AVCodecContext *avctx, void *arg)
3658 {
3659  H264Context *h = *(void **)arg;
3660  MpegEncContext *const s = &h->s;
3661  const int part_mask = s->partitioned_frame ? (ER_AC_END | ER_AC_ERROR)
3662  : 0x7F;
3663  int lf_x_start = s->mb_x;
3664 
3665  s->mb_skip_run = -1;
3666 
3668  s->codec_id != AV_CODEC_ID_H264 ||
3669  (CONFIG_GRAY && (s->flags & CODEC_FLAG_GRAY));
3670 
3671  if (h->pps.cabac) {
3672  /* realign */
3673  align_get_bits(&s->gb);
3674 
3675  /* init cabac */
3678  s->gb.buffer + get_bits_count(&s->gb) / 8,
3679  (get_bits_left(&s->gb) + 7) / 8);
3680 
3682 
3683  for (;;) {
3684  // START_TIMER
3685  int ret = ff_h264_decode_mb_cabac(h);
3686  int eos;
3687  // STOP_TIMER("decode_mb_cabac")
3688 
3689  if (ret >= 0)
3691 
3692  // FIXME optimal? or let mb_decode decode 16x32 ?
3693  if (ret >= 0 && FRAME_MBAFF) {
3694  s->mb_y++;
3695 
3696  ret = ff_h264_decode_mb_cabac(h);
3697 
3698  if (ret >= 0)
3700  s->mb_y--;
3701  }
3702  eos = get_cabac_terminate(&h->cabac);
3703 
3704  if ((s->workaround_bugs & FF_BUG_TRUNCATED) &&
3705  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
3706  ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
3707  s->mb_y, ER_MB_END & part_mask);
3708  if (s->mb_x >= lf_x_start)
3709  loop_filter(h, lf_x_start, s->mb_x + 1);
3710  return 0;
3711  }
3712  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
3713  av_log(h->s.avctx, AV_LOG_ERROR,
3714  "error while decoding MB %d %d, bytestream (%td)\n",
3715  s->mb_x, s->mb_y,
3718  s->mb_y, ER_MB_ERROR & part_mask);
3719  return -1;
3720  }
3721 
3722  if (++s->mb_x >= s->mb_width) {
3723  loop_filter(h, lf_x_start, s->mb_x);
3724  s->mb_x = lf_x_start = 0;
3725  decode_finish_row(h);
3726  ++s->mb_y;
3727  if (FIELD_OR_MBAFF_PICTURE) {
3728  ++s->mb_y;
3729  if (FRAME_MBAFF && s->mb_y < s->mb_height)
3731  }
3732  }
3733 
3734  if (eos || s->mb_y >= s->mb_height) {
3735  tprintf(s->avctx, "slice end %d %d\n",
3736  get_bits_count(&s->gb), s->gb.size_in_bits);
3737  ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1,
3738  s->mb_y, ER_MB_END & part_mask);
3739  if (s->mb_x > lf_x_start)
3740  loop_filter(h, lf_x_start, s->mb_x);
3741  return 0;
3742  }
3743  }
3744  } else {
3745  for (;;) {
3746  int ret = ff_h264_decode_mb_cavlc(h);
3747 
3748  if (ret >= 0)
3750 
3751  // FIXME optimal? or let mb_decode decode 16x32 ?
3752  if (ret >= 0 && FRAME_MBAFF) {
3753  s->mb_y++;
3754  ret = ff_h264_decode_mb_cavlc(h);
3755 
3756  if (ret >= 0)
3758  s->mb_y--;
3759  }
3760 
3761  if (ret < 0) {
3762  av_log(h->s.avctx, AV_LOG_ERROR,
3763  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
3765  s->mb_y, ER_MB_ERROR & part_mask);
3766  return -1;
3767  }
3768 
3769  if (++s->mb_x >= s->mb_width) {
3770  loop_filter(h, lf_x_start, s->mb_x);
3771  s->mb_x = lf_x_start = 0;
3772  decode_finish_row(h);
3773  ++s->mb_y;
3774  if (FIELD_OR_MBAFF_PICTURE) {
3775  ++s->mb_y;
3776  if (FRAME_MBAFF && s->mb_y < s->mb_height)
3778  }
3779  if (s->mb_y >= s->mb_height) {
3780  tprintf(s->avctx, "slice end %d %d\n",
3781  get_bits_count(&s->gb), s->gb.size_in_bits);
3782 
3783  if (get_bits_left(&s->gb) == 0) {
3785  s->mb_x - 1, s->mb_y,
3786  ER_MB_END & part_mask);
3787 
3788  return 0;
3789  } else {
3791  s->mb_x - 1, s->mb_y,
3792  ER_MB_END & part_mask);
3793 
3794  return -1;
3795  }
3796  }
3797  }
3798 
3799  if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0) {
3800  tprintf(s->avctx, "slice end %d %d\n",
3801  get_bits_count(&s->gb), s->gb.size_in_bits);
3802  if (get_bits_left(&s->gb) == 0) {
3804  s->mb_x - 1, s->mb_y,
3805  ER_MB_END & part_mask);
3806  if (s->mb_x > lf_x_start)
3807  loop_filter(h, lf_x_start, s->mb_x);
3808 
3809  return 0;
3810  } else {
3812  s->mb_y, ER_MB_ERROR & part_mask);
3813 
3814  return -1;
3815  }
3816  }
3817  }
3818  }
3819 }
3820 
3827 static int execute_decode_slices(H264Context *h, int context_count)
3828 {
3829  MpegEncContext *const s = &h->s;
3830  AVCodecContext *const avctx = s->avctx;
3831  H264Context *hx;
3832  int i;
3833 
3834  if (s->avctx->hwaccel ||
3836  return 0;
3837  if (context_count == 1) {
3838  return decode_slice(avctx, &h);
3839  } else {
3840  for (i = 1; i < context_count; i++) {
3841  hx = h->thread_context[i];
3842  hx->s.err_recognition = avctx->err_recognition;
3843  hx->s.error_count = 0;
3844  }
3845 
3846  avctx->execute(avctx, decode_slice, h->thread_context,
3847  NULL, context_count, sizeof(void *));
3848 
3849  /* pull back stuff from slices to master context */
3850  hx = h->thread_context[context_count - 1];
3851  s->mb_x = hx->s.mb_x;
3852  s->mb_y = hx->s.mb_y;
3853  s->droppable = hx->s.droppable;
3855  for (i = 1; i < context_count; i++)
3856  h->s.error_count += h->thread_context[i]->s.error_count;
3857  }
3858 
3859  return 0;
3860 }
3861 
3862 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
3863  int parse_extradata)
3864 {
3865  MpegEncContext *const s = &h->s;
3866  AVCodecContext *const avctx = s->avctx;
3867  H264Context *hx;
3868  int buf_index;
3869  int context_count;
3870  int next_avc;
3871  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
3872  int nals_needed = 0;
3873  int nal_index;
3874 
3876  if (!(s->flags2 & CODEC_FLAG2_CHUNKS)) {
3877  h->current_slice = 0;
3878  if (!s->first_field)
3880  ff_h264_reset_sei(h);
3881  }
3882 
3883  for (; pass <= 1; pass++) {
3884  buf_index = 0;
3885  context_count = 0;
3886  next_avc = h->is_avc ? 0 : buf_size;
3887  nal_index = 0;
3888  for (;;) {
3889  int consumed;
3890  int dst_length;
3891  int bit_length;
3892  const uint8_t *ptr;
3893  int i, nalsize = 0;
3894  int err;
3895 
3896  if (buf_index >= next_avc) {
3897  if (buf_index >= buf_size - h->nal_length_size)
3898  break;
3899  nalsize = 0;
3900  for (i = 0; i < h->nal_length_size; i++)
3901  nalsize = (nalsize << 8) | buf[buf_index++];
3902  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
3903  av_log(h->s.avctx, AV_LOG_ERROR,
3904  "AVC: nal size %d\n", nalsize);
3905  break;
3906  }
3907  next_avc = buf_index + nalsize;
3908  } else {
3909  // start code prefix search
3910  for (; buf_index + 3 < next_avc; buf_index++)
3911  // This should always succeed in the first iteration.
3912  if (buf[buf_index] == 0 &&
3913  buf[buf_index + 1] == 0 &&
3914  buf[buf_index + 2] == 1)
3915  break;
3916 
3917  if (buf_index + 3 >= buf_size) {
3918  buf_index = buf_size;
3919  break;
3920  }
3921 
3922  buf_index += 3;
3923  if (buf_index >= next_avc)
3924  continue;
3925  }
3926 
3927  hx = h->thread_context[context_count];
3928 
3929  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
3930  &consumed, next_avc - buf_index);
3931  if (ptr == NULL || dst_length < 0) {
3932  buf_index = -1;
3933  goto end;
3934  }
3935  i = buf_index + consumed;
3936  if ((s->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
3937  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
3938  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
3940 
3941  if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
3942  while (ptr[dst_length - 1] == 0 && dst_length > 0)
3943  dst_length--;
3944  bit_length = !dst_length ? 0
3945  : (8 * dst_length -
3946  decode_rbsp_trailing(h, ptr + dst_length - 1));
3947 
3948  if (s->avctx->debug & FF_DEBUG_STARTCODE)
3949  av_log(h->s.avctx, AV_LOG_DEBUG,
3950  "NAL %d at %d/%d length %d\n",
3951  hx->nal_unit_type, buf_index, buf_size, dst_length);
3952 
3953  if (h->is_avc && (nalsize != consumed) && nalsize)
3954  av_log(h->s.avctx, AV_LOG_DEBUG,
3955  "AVC: Consumed only %d bytes instead of %d\n",
3956  consumed, nalsize);
3957 
3958  buf_index += consumed;
3959  nal_index++;
3960 
3961  if (pass == 0) {
3962  /* packets can sometimes contain multiple PPS/SPS,
3963  * e.g. two PAFF field pictures in one packet, or a demuxer
3964  * which splits NALs strangely if so, when frame threading we
3965  * can't start the next thread until we've read all of them */
3966  switch (hx->nal_unit_type) {
3967  case NAL_SPS:
3968  case NAL_PPS:
3969  nals_needed = nal_index;
3970  break;
3971  case NAL_DPA:
3972  case NAL_IDR_SLICE:
3973  case NAL_SLICE:
3974  init_get_bits(&hx->s.gb, ptr, bit_length);
3975  if (!get_ue_golomb(&hx->s.gb))
3976  nals_needed = nal_index;
3977  }
3978  continue;
3979  }
3980 
3981  // FIXME do not discard SEI id
3982  if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
3983  continue;
3984 
3985 again:
3986  /* Ignore every NAL unit type except PPS and SPS during extradata
3987  * parsing. Decoding slices is not possible in codec init
3988  * with frame-mt */
3989  if (parse_extradata && HAVE_THREADS &&
3991  (hx->nal_unit_type != NAL_PPS &&
3992  hx->nal_unit_type != NAL_SPS)) {
3993  av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during "
3994  "extradata parsing\n", hx->nal_unit_type);
3996  }
3997  err = 0;
3998  switch (hx->nal_unit_type) {
3999  case NAL_IDR_SLICE:
4000  if (h->nal_unit_type != NAL_IDR_SLICE) {
4001  av_log(h->s.avctx, AV_LOG_ERROR,
4002  "Invalid mix of idr and non-idr slices\n");
4003  buf_index = -1;
4004  goto end;
4005  }
4006  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4007  case NAL_SLICE:
4008  init_get_bits(&hx->s.gb, ptr, bit_length);
4009  hx->intra_gb_ptr =
4010  hx->inter_gb_ptr = &hx->s.gb;
4011  hx->s.data_partitioning = 0;
4012 
4013  if ((err = decode_slice_header(hx, h)))
4014  break;
4015 
4017  (hx->nal_unit_type == NAL_IDR_SLICE) ||
4018  (h->sei_recovery_frame_cnt >= 0);
4019 
4020  if (h->current_slice == 1) {
4021  if (!(s->flags2 & CODEC_FLAG2_CHUNKS))
4022  decode_postinit(h, nal_index >= nals_needed);
4023 
4024  if (s->avctx->hwaccel &&
4025  s->avctx->hwaccel->start_frame(s->avctx, NULL, 0) < 0)
4026  return -1;
4030  }
4031 
4032  if (hx->redundant_pic_count == 0 &&
4033  (avctx->skip_frame < AVDISCARD_NONREF ||
4034  hx->nal_ref_idc) &&
4035  (avctx->skip_frame < AVDISCARD_BIDIR ||
4037  (avctx->skip_frame < AVDISCARD_NONKEY ||
4039  avctx->skip_frame < AVDISCARD_ALL) {
4040  if (avctx->hwaccel) {
4041  if (avctx->hwaccel->decode_slice(avctx,
4042  &buf[buf_index - consumed],
4043  consumed) < 0)
4044  return -1;
4045  } else if (CONFIG_H264_VDPAU_DECODER &&
4047  static const uint8_t start_code[] = {
4048  0x00, 0x00, 0x01 };
4049  ff_vdpau_add_data_chunk(s, start_code,
4050  sizeof(start_code));
4051  ff_vdpau_add_data_chunk(s, &buf[buf_index - consumed],
4052  consumed);
4053  } else
4054  context_count++;
4055  }
4056  break;
4057  case NAL_DPA:
4058  init_get_bits(&hx->s.gb, ptr, bit_length);
4059  hx->intra_gb_ptr =
4060  hx->inter_gb_ptr = NULL;
4061 
4062  if ((err = decode_slice_header(hx, h)) < 0)
4063  break;
4064 
4065  hx->s.data_partitioning = 1;
4066  break;
4067  case NAL_DPB:
4068  init_get_bits(&hx->intra_gb, ptr, bit_length);
4069  hx->intra_gb_ptr = &hx->intra_gb;
4070  break;
4071  case NAL_DPC:
4072  init_get_bits(&hx->inter_gb, ptr, bit_length);
4073  hx->inter_gb_ptr = &hx->inter_gb;
4074 
4075  if (hx->redundant_pic_count == 0 &&
4076  hx->intra_gb_ptr &&
4077  hx->s.data_partitioning &&
4078  s->current_picture_ptr &&
4079  s->context_initialized &&
4080  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4081  (avctx->skip_frame < AVDISCARD_BIDIR ||
4083  (avctx->skip_frame < AVDISCARD_NONKEY ||
4085  avctx->skip_frame < AVDISCARD_ALL)
4086  context_count++;
4087  break;
4088  case NAL_SEI:
4089  init_get_bits(&s->gb, ptr, bit_length);
4090  ff_h264_decode_sei(h);
4091  break;
4092  case NAL_SPS:
4093  init_get_bits(&s->gb, ptr, bit_length);
4094  if (ff_h264_decode_seq_parameter_set(h) < 0 &&
4095  h->is_avc && (nalsize != consumed) && nalsize) {
4096  av_log(h->s.avctx, AV_LOG_DEBUG,
4097  "SPS decoding failure, trying again with the complete NAL\n");
4098  init_get_bits(&s->gb, buf + buf_index + 1 - consumed,
4099  8 * (nalsize - 1));
4101  }
4102 
4103  if (h264_set_parameter_from_sps(h) < 0) {
4104  buf_index = -1;
4105  goto end;
4106  }
4107  break;
4108  case NAL_PPS:
4109  init_get_bits(&s->gb, ptr, bit_length);
4110  ff_h264_decode_picture_parameter_set(h, bit_length);
4111  break;
4112  case NAL_AUD:
4113  case NAL_END_SEQUENCE:
4114  case NAL_END_STREAM:
4115  case NAL_FILLER_DATA:
4116  case NAL_SPS_EXT:
4117  case NAL_AUXILIARY_SLICE:
4118  break;
4119  case NAL_FF_IGNORE:
4120  break;
4121  default:
4122  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4123  hx->nal_unit_type, bit_length);
4124  }
4125 
4126  if (context_count == h->max_contexts) {
4127  execute_decode_slices(h, context_count);
4128  context_count = 0;
4129  }
4130 
4131  if (err < 0)
4132  av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4133  else if (err == 1) {
4134  /* Slice could not be decoded in parallel mode, copy down
4135  * NAL unit stuff to context 0 and restart. Note that
4136  * rbsp_buffer is not transferred, but since we no longer
4137  * run in parallel mode this should not be an issue. */
4138  h->nal_unit_type = hx->nal_unit_type;
4139  h->nal_ref_idc = hx->nal_ref_idc;
4140  hx = h;
4141  goto again;
4142  }
4143  }
4144  }
4145  if (context_count)
4146  execute_decode_slices(h, context_count);
4147 
4148 end:
4149  /* clean up */
4150  if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s &&
4151  !s->droppable) {
4154  }
4155 
4156  return buf_index;
4157 }
4158 
4162 static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size)
4163 {
4164  if (pos == 0)
4165  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4166  if (pos + 10 > buf_size)
4167  pos = buf_size; // oops ;)
4168 
4169  return pos;
4170 }
4171 
4172 static int decode_frame(AVCodecContext *avctx, void *data,
4173  int *got_frame, AVPacket *avpkt)
4174 {
4175  const uint8_t *buf = avpkt->data;
4176  int buf_size = avpkt->size;
4177  H264Context *h = avctx->priv_data;
4178  MpegEncContext *s = &h->s;
4179  AVFrame *pict = data;
4180  int buf_index = 0;
4181 
4182  s->flags = avctx->flags;
4183  s->flags2 = avctx->flags2;
4184 
4185  /* end of stream, output what is still in the buffers */
4186 out:
4187  if (buf_size == 0) {
4188  Picture *out;
4189  int i, out_idx;
4190 
4192 
4193  // FIXME factorize this with the output code below
4194  out = h->delayed_pic[0];
4195  out_idx = 0;
4196  for (i = 1;
4197  h->delayed_pic[i] &&
4198  !h->delayed_pic[i]->f.key_frame &&
4199  !h->delayed_pic[i]->mmco_reset;
4200  i++)
4201  if (h->delayed_pic[i]->poc < out->poc) {
4202  out = h->delayed_pic[i];
4203  out_idx = i;
4204  }
4205 
4206  for (i = out_idx; h->delayed_pic[i]; i++)
4207  h->delayed_pic[i] = h->delayed_pic[i + 1];
4208 
4209  if (out) {
4210  *got_frame = 1;
4211  *pict = out->f;
4212  }
4213 
4214  return buf_index;
4215  }
4216 
4217  buf_index = decode_nal_units(h, buf, buf_size, 0);
4218  if (buf_index < 0)
4219  return -1;
4220 
4222  buf_size = 0;
4223  goto out;
4224  }
4225 
4226  if (!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr) {
4227  if (avctx->skip_frame >= AVDISCARD_NONREF)
4228  return 0;
4229  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4230  return -1;
4231  }
4232 
4233  if (!(s->flags2 & CODEC_FLAG2_CHUNKS) ||
4234  (s->mb_y >= s->mb_height && s->mb_height)) {
4235  if (s->flags2 & CODEC_FLAG2_CHUNKS)
4236  decode_postinit(h, 1);
4237 
4238  field_end(h, 0);
4239  h->context_reinitialized = 0;
4240 
4241  if (!h->next_output_pic) {
4242  /* Wait for second field. */
4243  *got_frame = 0;
4244  } else {
4245  *got_frame = 1;
4246  *pict = h->next_output_pic->f;
4247  }
4248  }
4249 
4250  assert(pict->data[0] || !*got_frame);
4251  ff_print_debug_info(s, pict);
4252 
4253  return get_consumed_bytes(s, buf_index, buf_size);
4254 }
4255 
4257 {
4258  int i;
4259 
4260  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4261 
4262  for (i = 0; i < MAX_SPS_COUNT; i++)
4263  av_freep(h->sps_buffers + i);
4264 
4265  for (i = 0; i < MAX_PPS_COUNT; i++)
4266  av_freep(h->pps_buffers + i);
4267 }
4268 
4270 {
4271  H264Context *h = avctx->priv_data;
4272  MpegEncContext *s = &h->s;
4273 
4275 
4276  ff_MPV_common_end(s);
4277 
4278  // memset(h, 0, sizeof(H264Context));
4279 
4280  return 0;
4281 }
4282 
4283 static const AVProfile profiles[] = {
4284  { FF_PROFILE_H264_BASELINE, "Baseline" },
4285  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
4286  { FF_PROFILE_H264_MAIN, "Main" },
4287  { FF_PROFILE_H264_EXTENDED, "Extended" },
4288  { FF_PROFILE_H264_HIGH, "High" },
4289  { FF_PROFILE_H264_HIGH_10, "High 10" },
4290  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
4291  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
4292  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
4293  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
4294  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
4295  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
4296  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
4297  { FF_PROFILE_UNKNOWN },
4298 };
4299 
4301  .name = "h264",
4302  .type = AVMEDIA_TYPE_VIDEO,
4303  .id = AV_CODEC_ID_H264,
4304  .priv_data_size = sizeof(H264Context),
4307  .decode = decode_frame,
4308  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
4311  .flush = flush_dpb,
4312  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
4313  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
4314  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
4315  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4316 };
4317 
4318 #if CONFIG_H264_VDPAU_DECODER
4319 AVCodec ff_h264_vdpau_decoder = {
4320  .name = "h264_vdpau",
4321  .type = AVMEDIA_TYPE_VIDEO,
4322  .id = AV_CODEC_ID_H264,
4323  .priv_data_size = sizeof(H264Context),
4326  .decode = decode_frame,
4328  .flush = flush_dpb,
4329  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
4330  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
4331  AV_PIX_FMT_NONE},
4332  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4333 };
4334 #endif
int chroma_format_idc
Definition: h264.h:150
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:486
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:640
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:52
enum AVPixelFormat ff_hwaccel_pixfmt_list_420[]
Definition: mpegvideo.c:133
GetBitContext inter_gb
Definition: h264.h:385
#define XCHG(a, b, xchg)
int video_signal_type_present_flag
Definition: h264.h:173
#define VERT_PRED8x8
Definition: h264pred.h:70
void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
Definition: vdpau.c:41
int last_slice_type
Definition: h264.h:530
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
Definition: h264_cabac.c:1861
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
Definition: h264.c:921
#define ER_AC_END
Definition: mpegvideo.h:498
const struct AVCodec * codec
Definition: avcodec.h:1348
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:305
#define PICT_TOP_FIELD
Definition: mpegvideo.h:639
discard all frames except keyframes
Definition: avcodec.h:535
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3106
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2656
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:61
unsigned int top_samples_available
Definition: h264.h:286
static enum PixelFormat get_pixel_format(H264Context *h)
Definition: h264.c:2466
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:3862
unsigned int topleft_samples_available
Definition: h264.h:285
#define DC_128_PRED8x8
Definition: h264pred.h:76
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
Definition: h264.h:528
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:138
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:1777
#define VERT_LEFT_PRED
Definition: h264pred.h:45
const uint8_t ff_zigzag_direct[64]
Definition: dsputil.c:59
int size
GetBitContext * intra_gb_ptr
Definition: h264.h:386
void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
Definition: mpegvideo.c:1283
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
int mb_aff_frame
Definition: h264.h:348
void ff_vdpau_h264_picture_complete(MpegEncContext *s)
Definition: vdpau.c:149
static void copy_parameter_set(void **to, void **from, int count, int size)
Definition: h264.c:1113
int delta_poc[2]
Definition: h264.h:464
#define IS_SUB_4X4(a)
Definition: mpegvideo.h:122
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:171
int last_qscale_diff
Definition: h264.h:406
#define LEFT_MBS
Definition: h264.h:66
mpeg2/4, h264 default
Definition: avcodec.h:585
#define CONFIG_SVQ3_DECODER
Definition: config.h:491
int cbp
Definition: h264.h:401
3: top field, bottom field, in that order
Definition: h264.h:136
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:170
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:882
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
int weighted_bipred_idc
Definition: h264.h:215
int chroma_qp_index_offset[2]
Definition: h264.h:218
const uint8_t * bytestream_end
Definition: cabac.h:48
int left_type[LEFT_MBS]
Definition: h264.h:277
#define CHROMA422
Definition: h264.h:88
uint16_t * cbp_table
Definition: h264.h:400
int qscale_type
Definition: avcodec.h:1150
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1043
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:572
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264.h:496
static void align_get_bits(GetBitContext *s)
Definition: get_bits.h:412
7: frame doubling
Definition: h264.h:140
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:623
#define MAX_PPS_COUNT
Definition: h264.h:43
Sequence parameter set.
Definition: h264.h:147
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2079
static const uint8_t field_scan8x8[64]
Definition: h264data.h:115
static void init_dequant_tables(H264Context *h)
Definition: h264.c:848
int bitstream_restriction_flag
Definition: h264.h:184
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:154
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:213
#define FMO
Definition: h264.h:53
int num
numerator
Definition: rational.h:44
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:100
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: avcodec.h:1225
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:149
int size
Definition: avcodec.h:916
enum AVCodecID codec_id
Definition: mpegvideo.h:227
int outputed_poc
Definition: h264.h:490
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:123
const uint8_t * buffer
Definition: get_bits.h:53
Picture parameter set.
Definition: h264.h:207
void * thread_opaque
used by multithreading to store frame-specific info
Definition: avcodec.h:1294
static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth, int index)
Definition: h264.c:1759
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:139
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1724
#define pass
Definition: fft.c:334
const uint8_t * field_scan8x8_q0
Definition: h264.h:422
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
int frame_mbs_only_flag
Definition: h264.h:163
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:55
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:447
int mmco_index
Definition: h264.h:497
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264.c:1150
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.h:414
mpegvideo header.
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:2393
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
Definition: h264.h:337
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:302
H264Context.
Definition: h264.h:254
discard all
Definition: avcodec.h:536
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:944
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
Definition: mpegvideo.h:132
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:105
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:466
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2711
uint32_t num_units_in_tick
Definition: h264.h:180
struct H264Context H264Context
H264Context.
4: bottom field, top field, in that order
Definition: h264.h:137
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:252
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
Definition: h264.c:295
int profile
profile
Definition: avcodec.h:2815
#define HOR_PRED8x8
Definition: h264pred.h:69
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2960
void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc)
Definition: svq3.c:175
int qscale
QP.
Definition: mpegvideo.h:342
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:344
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
static const uint8_t rem6[QP_MAX_NUM+1]
Definition: h264.c:50
#define IS_INTRA_PCM(a)
Definition: mpegvideo.h:111
int profile_idc
Definition: h264.h:148
unsigned current_sps_id
id of the current SPS
Definition: h264.h:328
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264.c:461
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:753
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:229
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
void ff_h264_init_cabac_states(H264Context *h)
Definition: h264_cabac.c:1262
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:126
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1465
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
Switching Intra.
Definition: avutil.h:249
uint8_t * chroma_pred_mode_table
Definition: h264.h:405
#define IS_DIR(a, part, list)
Definition: mpegvideo.h:125
static const uint8_t div6[QP_MAX_NUM+1]
Definition: h264.c:56
enum AVDiscard skip_frame
Definition: avcodec.h:2907
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
Definition: h264_refs.c:187
#define MAX_THREADS
Definition: mpegvideo.h:61
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_RN32A(p)
Definition: intreadwrite.h:446
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2622
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:135
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:1895
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:1128
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: utils.c:72
#define IS_8x8DCT(a)
Definition: h264.h:96
uint8_t scaling_matrix4[6][16]
Definition: h264.h:223
const uint8_t * bytestream
Definition: cabac.h:47
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264.h:381
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:219
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
#define IS_INTER(a)
Definition: mpegvideo.h:109
DCTELEM mb_luma_dc[3][16 *2]
Definition: h264.h:390
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:338
int ff_MPV_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1069
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:469
int use_weight
Definition: h264.h:355
int mb_uvlinesize
Definition: h264.h:323
int full_range
Definition: h264.h:174
void(* h264_luma_dc_dequant_idct)(DCTELEM *output, DCTELEM *input, int qmul)
Definition: h264dsp.h:103
#define IS_8X16(a)
Definition: mpegvideo.h:117
int offset_for_non_ref_pic
Definition: h264.h:156
int context_reinitialized
Definition: h264.h:451
float delta
#define PICT_FRAME
Definition: mpegvideo.h:641
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:40
Definition: h264.h:110
int luma_weight[48][2][2]
Definition: h264.h:360
enum OutputFormat out_format
output format
Definition: mpegvideo.h:219
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
Definition: h264.h:198
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride)
Definition: dsputil.h:144
enum AVColorPrimaries color_primaries
Definition: h264.h:176
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:326
void(* h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:82
DCTELEM mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:389
AVCodec ff_h264_decoder
Definition: h264.c:4300
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:378
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int cabac
entropy_coding_mode_flag
Definition: h264.h:209
int mb_xy
Definition: h264.h:427
Definition: h264.h:108
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:198
#define LUMA_DC_BLOCK_INDEX
Definition: h264.h:733
#define DIAG_DOWN_LEFT_PRED
Definition: h264pred.h:41
static const uint8_t dequant8_coeff_init[6][6]
Definition: h264data.h:263
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:375
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264.h:168
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:313
void ff_h264_fill_mbaff_ref_list(H264Context *h)
Definition: h264_refs.c:301
#define TOP_DC_PRED
Definition: h264pred.h:50
const char data[16]
Definition: mxf.c:66
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264.h:151
uint8_t * data
Definition: avcodec.h:915
static int init_poc(H264Context *h)
Definition: h264.c:2175
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:280
int left_mb_xy[LEFT_MBS]
Definition: h264.h:272
int top_mb_xy
Definition: h264.h:270
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:78
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3292
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:221
static const uint8_t dequant8_coeff_init_scan[16]
Definition: h264data.h:259
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:231
int interlaced_frame
The content of the picture is interlaced.
Definition: avcodec.h:1232
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:47
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:247
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1573
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define MB_MBAFF
Definition: h264.h:62
Picture * next_output_pic
Definition: h264.h:489
static av_cold void common_init(H264Context *h)
Definition: h264.c:967
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2086
int luma_log2_weight_denom
Definition: h264.h:357
#define IS_INTERLACED(a)
Definition: mpegvideo.h:112
static int h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:2413
int chroma_weight[48][2][2][2]
Definition: h264.h:361
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:488
static void init_dequant4_coeff_table(H264Context *h)
Definition: h264.c:822
#define r
Definition: input.c:51
void(* pred8x8l_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:102
const uint8_t * zigzag_scan8x8_cavlc_q0
Definition: h264.h:420
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:465
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:547
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
Definition: pthread.c:702
enum AVCodecID id
Definition: avcodec.h:2974
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:289
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:164
enum AVColorTransferCharacteristic color_trc
Definition: h264.h:177
H264PredContext hpc
Definition: h264.h:284
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:399
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:128
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1634
#define CONFIG_H264_VDPAU_DECODER
Definition: config.h:419
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264.c:2597
const uint8_t * zigzag_scan_q0
Definition: h264.h:418
int poc_type
pic_order_cnt_type
Definition: h264.h:153
Multithreading support functions.
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:36
static const uint16_t mask[17]
Definition: lzw.c:38
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:1978
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
Definition: avcodec.h:1132
int chroma_y_shift
Definition: mpegvideo.h:657
int nal_unit_type
Definition: h264.h:440
int use_weight_chroma
Definition: h264.h:356
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:567
int num_reorder_frames
Definition: h264.h:185
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:63
discard all bidirectional frames
Definition: avcodec.h:534
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:106
#define DC_128_PRED
Definition: h264pred.h:51
#define LEFT_DC_PRED
Definition: h264pred.h:49
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:774
GetBitContext * inter_gb_ptr
Definition: h264.h:387
void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
Definition: svq3.c:140
static void copy_picture_range(Picture **to, Picture **from, int count, MpegEncContext *new_base, MpegEncContext *old_base)
Definition: h264.c:1098
#define ER_MB_ERROR
Definition: mpegvideo.h:502
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:358
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:487
#define IS_SUB_8X4(a)
Definition: mpegvideo.h:120
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2752
int mb_field_decoding_flag
Definition: h264.h:349
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:2160
static int h264_slice_header_init(H264Context *, int)
Definition: h264.c:2517
int capabilities
Codec capabilities.
Definition: avcodec.h:2979
int emu_edge_width
Definition: h264.h:325
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:337
uint8_t * base[AV_NUM_DATA_POINTERS]
pointer to the first allocated byte of the picture.
Definition: avcodec.h:1073
#define s0
Definition: regdef.h:37
PPS pps
current pps
Definition: h264.h:334
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: mpegvideo.h:720
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:407
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:544
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
int direct_spatial_mv_pred
Definition: h264.h:364
0: frame
Definition: h264.h:133
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:36
int weighted_pred
weighted_pred_flag
Definition: h264.h:214
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:4162
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
#define T(x)
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: dsputil.h:145
static void predict_field_decoding_flag(H264Context *h)
Definition: h264.c:3611
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:557
#define IS_INTRA(a)
Definition: mpegvideo.h:108
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
Definition: h264_cavlc.c:695
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:570
static int square(int x)
Definition: roqvideoenc.c:111
GetBitContext gb
Definition: mpegvideo.h:626
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:377
int delta_pic_order_always_zero_flag
Definition: h264.h:155
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:244
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:201
int dct_bits
Size of DCT coefficients.
Definition: dsputil.h:198
int offset_for_top_to_bottom_field
Definition: h264.h:157
#define IN_RANGE(a, b, size)
Definition: h264.c:1096
int off
Definition: dsputil_bfin.c:28
uint8_t zigzag_scan8x8[64]
Definition: h264.h:413
int picture_count
number of allocated pictures (MAX_PICTURE_COUNT * avctx->thread_count)
Definition: mpegvideo.h:318
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
static const uint8_t scan8[16 *3+3]
Definition: h264.h:737
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2490
void(* add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:206
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:128
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:505
the normal 2^n-1 "JPEG" YUV ranges
Definition: avcodec.h:574
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
uint8_t * direct_table
Definition: h264.h:409
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264.c:1670
uint8_t scaling_matrix8[6][64]
Definition: h264.h:224
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:448
useful rectangle filling function
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: avcodec.h:573
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int refs
number of reference frames
Definition: avcodec.h:2022
CABACContext cabac
Cabac.
Definition: h264.h:396
unsigned int left_samples_available
Definition: h264.h:288
#define IS_8X8(a)
Definition: mpegvideo.h:118
int err_recognition
Definition: mpegvideo.h:510
#define FRAME_MBAFF
Definition: h264.h:64
int ref_frame_count
num_ref_frames
Definition: h264.h:159
Picture * long_ref[32]
Definition: h264.h:485
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2981
static DCTELEM block[64]
Definition: dct-test.c:169
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
Definition: avcodec.h:1065
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
int frame_num_offset
for POC type 2
Definition: h264.h:468
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2602
int x264_build
Definition: h264.h:425
uint32_t * mb2br_xy
Definition: h264.h:319
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:342
uint8_t field_scan8x8_cavlc[64]
Definition: h264.h:417
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:158
int colour_description_present_flag
Definition: h264.h:175
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
int poc
h264 frame POC
Definition: mpegvideo.h:130
AVRational sar
Definition: h264.h:172
int redundant_pic_count
Definition: h264.h:482
static const uint8_t field_scan8x8_cavlc[64]
Definition: h264data.h:134
int width
picture width / height.
Definition: avcodec.h:1508
static av_always_inline void fill_filter_caches_inter(H264Context *h, MpegEncContext *const s, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264.c:3310
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:317
int long_ref_count
number of actual long term references
Definition: h264.h:500
void ff_copy_picture(Picture *dst, Picture *src)
Definition: mpegvideo.c:223
Picture.
Definition: mpegvideo.h:94
#define CONFIG_GRAY
Definition: config.h:276
qpel_mc_func avg_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:328
void(* pred4x4_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:100
int cabac_init_idc
Definition: h264.h:503
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
Definition: h264.c:2066
int size_in_bits
Definition: get_bits.h:55
SPS sps
current sps
Definition: h264.h:329
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:454
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
Definition: h264.c:731
#define CONFIG_SMALL
Definition: config.h:315
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2058
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:401
#define MB_FIELD
Definition: h264.h:63
#define MAX_SPS_COUNT
Definition: h264.h:42
void ff_er_frame_end(MpegEncContext *s)
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:490
void ff_vdpau_h264_picture_start(MpegEncContext *s)
Definition: vdpau.c:130
int emu_edge_height
Definition: h264.h:326
Context Adaptive Binary Arithmetic Coder inline functions.
int level
level
Definition: avcodec.h:2885
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:216
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:131
int mmco_reset
Definition: h264.h:498
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:282
uint8_t * bipred_scratchpad
Definition: h264.h:580
MotionEstContext me
Definition: mpegvideo.h:405
#define ER_AC_ERROR
Definition: mpegvideo.h:495
int poc_lsb
Definition: h264.h:461
int max_pic_num
max_frame_num or 2 * max_frame_num for field pics.
Definition: h264.h:480
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1474
static const uint8_t field_scan[16]
Definition: h264data.h:62
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
Definition: h264_refs.c:491
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
Definition: mpegvideo.c:245
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264.c:3657
unsigned int topright_samples_available
Definition: h264.h:287
#define AV_WN16A(p, v)
Definition: intreadwrite.h:454
const uint8_t * zigzag_scan8x8_q0
Definition: h264.h:419
int curr_pic_num
frame_num for frames or 2 * frame_num + 1 for field pics.
Definition: h264.h:475
int slice_type
Definition: h264.h:343
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264.c:2256
Definition: h264.h:105
static int av_unused get_cabac_terminate(CABACContext *c)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegvideo.h:87
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1370
int top_type
Definition: h264.h:275
#define HAVE_THREADS
Definition: config.h:235
static void loop_filter(H264Context *h, int start_x, int end_x)
Definition: h264.c:3541
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:570
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
Definition: h264.h:336
void(* h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:88
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:45
#define PART_NOT_AVAILABLE
Definition: h264.h:305
unsigned int list_count
Definition: h264.h:376
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2733
#define IS_16X8(a)
Definition: mpegvideo.h:116
void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: vdpau.c:110
GetBitContext intra_gb
Definition: h264.h:384
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:663
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:456
int pic_order_present
pic_order_present_flag
Definition: h264.h:210
static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
Definition: dsputil_sh4.c:73
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:453
void(* h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:84
struct H264Context * thread_context[MAX_THREADS]
Definition: h264.h:509
int chroma_log2_weight_denom
Definition: h264.h:358
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
Definition: avcodec.h:1180
static void flush_change(H264Context *h)
Definition: h264.c:2141
short offset_for_ref_frame[256]
Definition: h264.h:183
int timing_info_present_flag
Definition: h264.h:179
NULL
Definition: eval.c:52
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
Definition: h264.c:3625
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:527
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:288
external API header
H264 / AVC / MPEG4 part10 codec data table
int ff_h264_frame_start(H264Context *h)
Definition: h264.c:1287
MpegEncContext s
Definition: h264.h:255
Definition: h264.h:109
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread.c:684
int slice_alpha_c0_offset
Definition: h264.h:433
1: top field
Definition: h264.h:134
enum AVCodecID codec_id
Definition: avcodec.h:1350
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:96
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:432
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:470
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264.c:308
int next_outputed_poc
Definition: h264.h:491
#define LTOP
Definition: h264.h:67
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:164
int poc_msb
Definition: h264.h:462
int debug
debug
Definition: avcodec.h:2568
int implicit_weight[48][48][2]
Definition: h264.h:362
int max_contexts
Max number of threads / contexts.
Definition: h264.h:522
main external API structure.
Definition: avcodec.h:1339
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:80
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:1359
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:865
2: bottom field
Definition: h264.h:135
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:215
#define QP_MAX_NUM
Definition: h264.h:98
static enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[]
Definition: h264.c:62
int16_t(*[2] motion_val)[2]
motion vector table
Definition: avcodec.h:1172
static void init_dequant8_coeff_table(H264Context *h)
Definition: h264.c:795
qpel_mc_func put_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:327
Picture * picture
main picture buffer
Definition: mpegvideo.h:255
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:566
int extradata_size
Definition: avcodec.h:1455
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:103
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:200
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:536
Picture * short_ref[32]
Definition: h264.h:484
void(* h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:86
Switching Predicted.
Definition: avutil.h:250
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1580
int slice_beta_offset
Definition: h264.h:434
#define ER_MB_END
Definition: mpegvideo.h:503
const uint8_t * field_scan8x8_cavlc_q0
Definition: h264.h:423
void ff_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2391
int index
Definition: gxfenc.c:72
uint32_t(*[6] dequant8_coeff)[64]
Definition: h264.h:339
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:4269
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264.h:260
void av_log_missing_feature(void *avc, const char *feature, int want_sample)
Log a generic warning message about a missing feature.
Definition: utils.c:2005
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2072
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2065
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
int context_initialized
Definition: mpegvideo.h:242
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:257
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice)
Definition: h264_refs.c:686
void ff_er_frame_start(MpegEncContext *s)
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
Definition: h264.c:352
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2046
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:34
int8_t * ref_index[2]
motion reference frame index the order in which these are stored can depend on the codec...
Definition: avcodec.h:1195
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:361
#define s1
Definition: regdef.h:38
unsigned int sps_id
Definition: h264.h:208
#define CABAC
Definition: h264.h:85
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:154
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:139
short DCTELEM
Definition: dsputil.h:39
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:94
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:132
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:316
uint32_t time_scale
Definition: h264.h:181
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:129
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:222
static int pred_weight_table(H264Context *h)
Definition: h264.c:1998
int pic_struct_present_flag
Definition: h264.h:191
Definition: h264.h:103
uint8_t zigzag_scan[16]
Definition: h264.h:412
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4256
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:324
#define FIELD_OR_MBAFF_PICTURE
Definition: h264.h:82
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
int8_t * qscale_table
QP table.
Definition: avcodec.h:1139
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:106
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:199
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
#define LBOT
Definition: h264.h:68
Definition: vf_drawbox.c:36
void(* clear_blocks)(DCTELEM *blocks)
Definition: dsputil.h:219
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
struct MpegEncContext * owner2
pointer to the MpegEncContext that allocated this picture
Definition: mpegvideo.h:148
int height
Definition: gxfenc.c:72
MpegEncContext.
Definition: mpegvideo.h:211
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:316
struct AVCodecContext * avctx
Definition: mpegvideo.h:213
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:2131
static int field_end(H264Context *h, int in_setup)
Definition: h264.c:2290
hardware decoding through VDA
Definition: pixfmt.h:153
discard all non reference
Definition: avcodec.h:533
int is_complex
Definition: h264.h:429
int mb_height
pic_height_in_map_units_minus1 + 1
Definition: h264.h:162
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:325
uint8_t * rbsp_buffer[2]
Definition: h264.h:441
static const uint8_t dequant4_coeff_init[6][3]
Definition: h264data.h:250
#define tprintf(p,...)
Definition: get_bits.h:613
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:248
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
#define FIELD_PICTURE
Definition: h264.h:65
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1772
uint16_t * slice_table_base
Definition: h264.h:458
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:152
H.264 / AVC / MPEG4 part10 motion vector predicion.
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:295
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:315
Bi-dir predicted.
Definition: avutil.h:247
AVProfile.
Definition: avcodec.h:2948
static int execute_decode_slices(H264Context *h, int context_count)
Call decode_slice() for each context.
Definition: h264.c:3827
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:79
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2517
int cur_chroma_format_idc
Definition: h264.h:579
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2893
int den
denominator
Definition: rational.h:45
int chroma_qp[2]
Definition: h264.h:258
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:551
uint16_t sub_mb_type[4]
Definition: h264.h:352
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:197
DSP utils.
int intra16x16_pred_mode
Definition: h264.h:267
void * priv_data
Definition: avcodec.h:1382
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:467
#define IS_SUB_4X8(a)
Definition: mpegvideo.h:121
int picture_structure
Definition: mpegvideo.h:637
Definition: h264.h:104
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:124
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264.c:1573
Definition: h264.h:107
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2773
VideoDSPContext vdsp
Definition: mpegvideo.h:362
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: avcodec.h:1239
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:119
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:4172
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:48
#define IS_DIRECT(a)
Definition: mpegvideo.h:113
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1141
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
Print debugging info for the given picture.
Definition: mpegvideo.c:1735
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:91
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:295
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:989
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:289
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:506
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1390
void ff_init_cabac_states(CABACContext *c)
Definition: cabac.c:139
static int fill_filter_caches(H264Context *h, int mb_type)
Definition: h264.c:3395
void(* add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:207
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:301
int key_frame
1 -> keyframe, 0-> not
Definition: avcodec.h:1058
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:253
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:514
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264.h:161
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1441
#define IS_16X16(a)
Definition: mpegvideo.h:115
#define AV_RN16A(p)
Definition: intreadwrite.h:442
uint32_t * mb2b_xy
Definition: h264.h:318
int slice_type_fixed
Definition: h264.h:345
struct AVFrame f
Definition: mpegvideo.h:95
int delta_poc_bottom
Definition: h264.h:463
const uint8_t * field_scan_q0
Definition: h264.h:421
static void free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:758
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
Definition: h264_refs.c:108
H264DSPContext h264dsp
Definition: h264.h:256
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:230
uint8_t field_scan8x8[64]
Definition: h264.h:416
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:236
#define copy_fields(to, from, start_field, end_field)
Definition: h264.c:1142
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:573
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:575
int8_t * intra4x4_pred_mode
Definition: h264.h:283
static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth, int index, int value)
Definition: h264.c:1768
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:666
int mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264.h:322
static int clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
Definition: h264.c:2351
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3119
8: frame tripling
Definition: h264.h:141
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:432
#define AV_RN64A(p)
Definition: intreadwrite.h:450
#define LIST_NOT_USED
Definition: h264.h:304
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3130
uint8_t(* non_zero_count)[48]
Definition: h264.h:297
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264.h:170
exp golomb vlc stuff
int slice_num
Definition: h264.h:341
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:254
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:570
static const uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264data.h:96
int level_idc
Definition: h264.h:149
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2547
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, ptrdiff_t stride)
Definition: h264pred.h:95
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:439
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:107
uint8_t field_scan[16]
Definition: h264.h:415
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:69
int b_stride
Definition: h264.h:320
Predicted.
Definition: avutil.h:246
unsigned int rbsp_buffer_size[2]
Definition: h264.h:442
#define CHROMA444
Definition: h264.h:89
Context Adaptive Binary Arithmetic Coder.
void ff_MPV_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:718
int8_t ref_cache[2][5 *8]
Definition: h264.h:303
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:350
int short_ref_count
number of actual short term references
Definition: h264.h:501
static const AVProfile profiles[]
Definition: h264.c:4283
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
enum AVColorSpace colorspace
Definition: h264.h:178