Libav
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of Libav.
11  *
12  * Libav is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * Libav is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with Libav; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "rectangle.h"
32 #include "thread.h"
33 #include "vp8.h"
34 #include "vp8data.h"
35 #include "libavutil/avassert.h"
36 
37 #if ARCH_ARM
38 # include "arm/vp8.h"
39 #endif
40 
41 static void free_buffers(VP8Context *s)
42 {
43  int i;
44  if (s->thread_data)
45  for (i = 0; i < MAX_THREADS; i++) {
46 #if HAVE_THREADS
47  pthread_cond_destroy(&s->thread_data[i].cond);
49 #endif
51  }
52  av_freep(&s->thread_data);
55  av_freep(&s->top_nnz);
56  av_freep(&s->top_border);
57 
58  s->macroblocks = NULL;
59 }
60 
61 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
62 {
63  int ret;
64  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
65  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
66  return ret;
67  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
69  return AVERROR(ENOMEM);
70  }
71  return 0;
72 }
73 
75 {
78 }
79 
80 #if CONFIG_VP8_DECODER
81 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
82 {
83  int ret;
84 
85  vp8_release_frame(s, dst);
86 
87  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
88  return ret;
89  if (src->seg_map &&
90  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
91  vp8_release_frame(s, dst);
92  return AVERROR(ENOMEM);
93  }
94 
95  return 0;
96 }
97 #endif /* CONFIG_VP8_DECODER */
98 
99 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
100 {
101  VP8Context *s = avctx->priv_data;
102  int i;
103 
104  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
105  vp8_release_frame(s, &s->frames[i]);
106  memset(s->framep, 0, sizeof(s->framep));
107 
108  if (free_mem)
109  free_buffers(s);
110 }
111 
112 static void vp8_decode_flush(AVCodecContext *avctx)
113 {
114  vp8_decode_flush_impl(avctx, 0);
115 }
116 
118 {
119  VP8Frame *frame = NULL;
120  int i;
121 
122  // find a free buffer
123  for (i = 0; i < 5; i++)
124  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
125  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
126  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
127  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
128  frame = &s->frames[i];
129  break;
130  }
131  if (i == 5) {
132  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
133  abort();
134  }
135  if (frame->tf.f->data[0])
136  vp8_release_frame(s, frame);
137 
138  return frame;
139 }
140 
141 static av_always_inline
142 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
143 {
144  AVCodecContext *avctx = s->avctx;
145  int i, ret;
146 
147  if (width != s->avctx->width ||
148  height != s->avctx->height) {
150 
151  ret = ff_set_dimensions(s->avctx, width, height);
152  if (ret < 0)
153  return ret;
154  }
155 
156  s->mb_width = (s->avctx->coded_width + 15) / 16;
157  s->mb_height = (s->avctx->coded_height + 15) / 16;
158 
159  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
160  avctx->thread_count > 1;
161  if (!s->mb_layout) { // Frame threading and one thread
162  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
163  sizeof(*s->macroblocks));
165  } else // Sliced threading
166  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
167  sizeof(*s->macroblocks));
168  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
169  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
171 
172  for (i = 0; i < MAX_THREADS; i++) {
174  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
175 #if HAVE_THREADS
176  pthread_mutex_init(&s->thread_data[i].lock, NULL);
177  pthread_cond_init(&s->thread_data[i].cond, NULL);
178 #endif
179  }
180 
181  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
182  (!s->intra4x4_pred_mode_top && !s->mb_layout))
183  return AVERROR(ENOMEM);
184 
185  s->macroblocks = s->macroblocks_base + 1;
186 
187  return 0;
188 }
189 
191 {
192  return update_dimensions(s, width, height, IS_VP7);
193 }
194 
196 {
197  return update_dimensions(s, width, height, IS_VP8);
198 }
199 
201 {
202  VP56RangeCoder *c = &s->c;
203  int i;
204 
206 
207  if (vp8_rac_get(c)) { // update segment feature data
209 
210  for (i = 0; i < 4; i++)
212 
213  for (i = 0; i < 4; i++)
215  }
216  if (s->segmentation.update_map)
217  for (i = 0; i < 3; i++)
218  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
219 }
220 
222 {
223  VP56RangeCoder *c = &s->c;
224  int i;
225 
226  for (i = 0; i < 4; i++) {
227  if (vp8_rac_get(c)) {
228  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
229 
230  if (vp8_rac_get(c))
231  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
232  }
233  }
234 
235  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
236  if (vp8_rac_get(c)) {
237  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
238 
239  if (vp8_rac_get(c))
240  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
241  }
242  }
243 }
244 
245 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
246 {
247  const uint8_t *sizes = buf;
248  int i;
249 
250  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
251 
252  buf += 3 * (s->num_coeff_partitions - 1);
253  buf_size -= 3 * (s->num_coeff_partitions - 1);
254  if (buf_size < 0)
255  return -1;
256 
257  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
258  int size = AV_RL24(sizes + 3 * i);
259  if (buf_size - size < 0)
260  return -1;
261 
262  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
263  buf += size;
264  buf_size -= size;
265  }
266  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
267 
268  return 0;
269 }
270 
271 static void vp7_get_quants(VP8Context *s)
272 {
273  VP56RangeCoder *c = &s->c;
274 
275  int yac_qi = vp8_rac_get_uint(c, 7);
276  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
277  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
278  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
279  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
280  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
281 
282  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
283  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
284  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
285  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
286  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
287  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
288 }
289 
290 static void get_quants(VP8Context *s)
291 {
292  VP56RangeCoder *c = &s->c;
293  int i, base_qi;
294 
295  int yac_qi = vp8_rac_get_uint(c, 7);
296  int ydc_delta = vp8_rac_get_sint(c, 4);
297  int y2dc_delta = vp8_rac_get_sint(c, 4);
298  int y2ac_delta = vp8_rac_get_sint(c, 4);
299  int uvdc_delta = vp8_rac_get_sint(c, 4);
300  int uvac_delta = vp8_rac_get_sint(c, 4);
301 
302  for (i = 0; i < 4; i++) {
303  if (s->segmentation.enabled) {
304  base_qi = s->segmentation.base_quant[i];
305  if (!s->segmentation.absolute_vals)
306  base_qi += yac_qi;
307  } else
308  base_qi = yac_qi;
309 
310  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
311  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
312  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
313  /* 101581>>16 is equivalent to 155/100 */
314  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
315  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
316  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
317 
318  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
319  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
320  }
321 }
322 
336 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
337 {
338  VP56RangeCoder *c = &s->c;
339 
340  if (update)
341  return VP56_FRAME_CURRENT;
342 
343  switch (vp8_rac_get_uint(c, 2)) {
344  case 1:
345  return VP56_FRAME_PREVIOUS;
346  case 2:
348  }
349  return VP56_FRAME_NONE;
350 }
351 
353 {
354  int i, j;
355  for (i = 0; i < 4; i++)
356  for (j = 0; j < 16; j++)
357  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
358  sizeof(s->prob->token[i][j]));
359 }
360 
362 {
363  VP56RangeCoder *c = &s->c;
364  int i, j, k, l, m;
365 
366  for (i = 0; i < 4; i++)
367  for (j = 0; j < 8; j++)
368  for (k = 0; k < 3; k++)
369  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
371  int prob = vp8_rac_get_uint(c, 8);
372  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
373  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
374  }
375 }
376 
377 #define VP7_MVC_SIZE 17
378 #define VP8_MVC_SIZE 19
379 
381  int mvc_size)
382 {
383  VP56RangeCoder *c = &s->c;
384  int i, j;
385 
386  if (vp8_rac_get(c))
387  for (i = 0; i < 4; i++)
388  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
389  if (vp8_rac_get(c))
390  for (i = 0; i < 3; i++)
391  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
392 
393  // 17.2 MV probability update
394  for (i = 0; i < 2; i++)
395  for (j = 0; j < mvc_size; j++)
397  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
398 }
399 
400 static void update_refs(VP8Context *s)
401 {
402  VP56RangeCoder *c = &s->c;
403 
404  int update_golden = vp8_rac_get(c);
405  int update_altref = vp8_rac_get(c);
406 
407  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
408  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
409 }
410 
411 static void copy_luma(AVFrame *dst, AVFrame *src, int width, int height)
412 {
413  int i, j;
414 
415  for (j = 1; j < 3; j++) {
416  for (i = 0; i < height / 2; i++)
417  memcpy(dst->data[j] + i * dst->linesize[j],
418  src->data[j] + i * src->linesize[j], width / 2);
419  }
420 }
421 
422 static void fade(uint8_t *dst, uint8_t *src,
423  int width, int height, int linesize,
424  int alpha, int beta)
425 {
426  int i, j;
427 
428  for (j = 0; j < height; j++) {
429  for (i = 0; i < width; i++) {
430  uint8_t y = src[j * linesize + i];
431  dst[j * linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
432  }
433  }
434 }
435 
437 {
438  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
439  int beta = (int8_t) vp8_rac_get_uint(c, 8);
440  int ret;
441 
442  if (!s->keyframe && (alpha || beta)) {
443  int width = s->mb_width * 16;
444  int height = s->mb_height * 16;
445  AVFrame *src, *dst;
446 
447  if (!s->framep[VP56_FRAME_PREVIOUS])
448  return AVERROR_INVALIDDATA;
449 
450  dst =
451  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
452 
453  /* preserve the golden frame, write a new previous frame */
456  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
457  return ret;
458 
459  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
460 
461  copy_luma(dst, src, width, height);
462  }
463 
464  fade(dst->data[0], src->data[0],
465  width, height, dst->linesize[0], alpha, beta);
466  }
467 
468  return 0;
469 }
470 
471 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
472 {
473  VP56RangeCoder *c = &s->c;
474  int part1_size, hscale, vscale, i, j, ret;
475  int width = s->avctx->width;
476  int height = s->avctx->height;
477 
478  if (buf_size < 4) {
479  return AVERROR_INVALIDDATA;
480  }
481 
482  s->profile = (buf[0] >> 1) & 7;
483  if (s->profile > 1) {
484  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
485  return AVERROR_INVALIDDATA;
486  }
487 
488  s->keyframe = !(buf[0] & 1);
489  s->invisible = 0;
490  part1_size = AV_RL24(buf) >> 4;
491 
492  buf += 4 - s->profile;
493  buf_size -= 4 - s->profile;
494 
495  if (buf_size < part1_size) {
496  return AVERROR_INVALIDDATA;
497  }
498 
499  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
500 
501  ff_vp56_init_range_decoder(c, buf, part1_size);
502  buf += part1_size;
503  buf_size -= part1_size;
504 
505  /* A. Dimension information (keyframes only) */
506  if (s->keyframe) {
507  width = vp8_rac_get_uint(c, 12);
508  height = vp8_rac_get_uint(c, 12);
509  hscale = vp8_rac_get_uint(c, 2);
510  vscale = vp8_rac_get_uint(c, 2);
511  if (hscale || vscale)
512  avpriv_request_sample(s->avctx, "Upscaling");
513 
517  sizeof(s->prob->pred16x16));
519  sizeof(s->prob->pred8x8c));
520  for (i = 0; i < 2; i++)
521  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
522  sizeof(vp7_mv_default_prob[i]));
523  memset(&s->segmentation, 0, sizeof(s->segmentation));
524  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
525  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
526  }
527 
528  if (s->keyframe || s->profile > 0)
529  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
530 
531  /* B. Decoding information for all four macroblock-level features */
532  for (i = 0; i < 4; i++) {
533  s->feature_enabled[i] = vp8_rac_get(c);
534  if (s->feature_enabled[i]) {
536 
537  for (j = 0; j < 3; j++)
538  s->feature_index_prob[i][j] =
539  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
540 
541  if (vp7_feature_value_size[s->profile][i])
542  for (j = 0; j < 4; j++)
543  s->feature_value[i][j] =
545  }
546  }
547 
548  s->segmentation.enabled = 0;
549  s->segmentation.update_map = 0;
550  s->lf_delta.enabled = 0;
551 
552  s->num_coeff_partitions = 1;
553  ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
554 
555  if (!s->macroblocks_base || /* first frame */
556  width != s->avctx->width || height != s->avctx->height ||
557  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
558  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
559  return ret;
560  }
561 
562  /* C. Dequantization indices */
563  vp7_get_quants(s);
564 
565  /* D. Golden frame update flag (a Flag) for interframes only */
566  if (!s->keyframe) {
569  }
570 
571  s->update_last = 1;
572  s->update_probabilities = 1;
573  s->fade_present = 1;
574 
575  if (s->profile > 0) {
577  if (!s->update_probabilities)
578  s->prob[1] = s->prob[0];
579 
580  if (!s->keyframe)
581  s->fade_present = vp8_rac_get(c);
582  }
583 
584  /* E. Fading information for previous frame */
585  if (s->fade_present && vp8_rac_get(c)) {
586  if ((ret = vp7_fade_frame(s ,c)) < 0)
587  return ret;
588  }
589 
590  /* F. Loop filter type */
591  if (!s->profile)
592  s->filter.simple = vp8_rac_get(c);
593 
594  /* G. DCT coefficient ordering specification */
595  if (vp8_rac_get(c))
596  for (i = 1; i < 16; i++)
597  s->prob[0].scan[i] = zigzag_scan[vp8_rac_get_uint(c, 4)];
598 
599  /* H. Loop filter levels */
600  if (s->profile > 0)
601  s->filter.simple = vp8_rac_get(c);
602  s->filter.level = vp8_rac_get_uint(c, 6);
603  s->filter.sharpness = vp8_rac_get_uint(c, 3);
604 
605  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
607 
608  s->mbskip_enabled = 0;
609 
610  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
611  if (!s->keyframe) {
612  s->prob->intra = vp8_rac_get_uint(c, 8);
613  s->prob->last = vp8_rac_get_uint(c, 8);
615  }
616 
617  return 0;
618 }
619 
620 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
621 {
622  VP56RangeCoder *c = &s->c;
623  int header_size, hscale, vscale, ret;
624  int width = s->avctx->width;
625  int height = s->avctx->height;
626 
627  s->keyframe = !(buf[0] & 1);
628  s->profile = (buf[0]>>1) & 7;
629  s->invisible = !(buf[0] & 0x10);
630  header_size = AV_RL24(buf) >> 5;
631  buf += 3;
632  buf_size -= 3;
633 
634  if (s->profile > 3)
635  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
636 
637  if (!s->profile)
639  sizeof(s->put_pixels_tab));
640  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
642  sizeof(s->put_pixels_tab));
643 
644  if (header_size > buf_size - 7 * s->keyframe) {
645  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
646  return AVERROR_INVALIDDATA;
647  }
648 
649  if (s->keyframe) {
650  if (AV_RL24(buf) != 0x2a019d) {
652  "Invalid start code 0x%x\n", AV_RL24(buf));
653  return AVERROR_INVALIDDATA;
654  }
655  width = AV_RL16(buf + 3) & 0x3fff;
656  height = AV_RL16(buf + 5) & 0x3fff;
657  hscale = buf[4] >> 6;
658  vscale = buf[6] >> 6;
659  buf += 7;
660  buf_size -= 7;
661 
662  if (hscale || vscale)
663  avpriv_request_sample(s->avctx, "Upscaling");
664 
668  sizeof(s->prob->pred16x16));
670  sizeof(s->prob->pred8x8c));
671  memcpy(s->prob->mvc, vp8_mv_default_prob,
672  sizeof(s->prob->mvc));
673  memset(&s->segmentation, 0, sizeof(s->segmentation));
674  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
675  }
676 
677  ff_vp56_init_range_decoder(c, buf, header_size);
678  buf += header_size;
679  buf_size -= header_size;
680 
681  if (s->keyframe) {
682  if (vp8_rac_get(c))
683  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
684  vp8_rac_get(c); // whether we can skip clamping in dsp functions
685  }
686 
687  if ((s->segmentation.enabled = vp8_rac_get(c)))
689  else
690  s->segmentation.update_map = 0; // FIXME: move this to some init function?
691 
692  s->filter.simple = vp8_rac_get(c);
693  s->filter.level = vp8_rac_get_uint(c, 6);
694  s->filter.sharpness = vp8_rac_get_uint(c, 3);
695 
696  if ((s->lf_delta.enabled = vp8_rac_get(c)))
697  if (vp8_rac_get(c))
698  update_lf_deltas(s);
699 
700  if (setup_partitions(s, buf, buf_size)) {
701  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
702  return AVERROR_INVALIDDATA;
703  }
704 
705  if (!s->macroblocks_base || /* first frame */
706  width != s->avctx->width || height != s->avctx->height)
707  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
708  return ret;
709 
710  get_quants(s);
711 
712  if (!s->keyframe) {
713  update_refs(s);
715  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
716  }
717 
718  // if we aren't saving this frame's probabilities for future frames,
719  // make a copy of the current probabilities
720  if (!(s->update_probabilities = vp8_rac_get(c)))
721  s->prob[1] = s->prob[0];
722 
723  s->update_last = s->keyframe || vp8_rac_get(c);
724 
726 
727  if ((s->mbskip_enabled = vp8_rac_get(c)))
728  s->prob->mbskip = vp8_rac_get_uint(c, 8);
729 
730  if (!s->keyframe) {
731  s->prob->intra = vp8_rac_get_uint(c, 8);
732  s->prob->last = vp8_rac_get_uint(c, 8);
733  s->prob->golden = vp8_rac_get_uint(c, 8);
735  }
736 
737  return 0;
738 }
739 
740 static av_always_inline
741 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
742 {
743  dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
744  dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
745 }
746 
750 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
751 {
752  int bit, x = 0;
753 
754  if (vp56_rac_get_prob_branchy(c, p[0])) {
755  int i;
756 
757  for (i = 0; i < 3; i++)
758  x += vp56_rac_get_prob(c, p[9 + i]) << i;
759  for (i = (vp7 ? 7 : 9); i > 3; i--)
760  x += vp56_rac_get_prob(c, p[9 + i]) << i;
761  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
762  x += 8;
763  } else {
764  // small_mvtree
765  const uint8_t *ps = p + 2;
766  bit = vp56_rac_get_prob(c, *ps);
767  ps += 1 + 3 * bit;
768  x += 4 * bit;
769  bit = vp56_rac_get_prob(c, *ps);
770  ps += 1 + bit;
771  x += 2 * bit;
772  x += vp56_rac_get_prob(c, *ps);
773  }
774 
775  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
776 }
777 
778 static av_always_inline
779 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
780 {
781  if (is_vp7)
782  return vp7_submv_prob;
783 
784  if (left == top)
785  return vp8_submv_prob[4 - !!left];
786  if (!top)
787  return vp8_submv_prob[2];
788  return vp8_submv_prob[1 - !!left];
789 }
790 
795 static av_always_inline
797  int layout, int is_vp7)
798 {
799  int part_idx;
800  int n, num;
801  VP8Macroblock *top_mb;
802  VP8Macroblock *left_mb = &mb[-1];
803  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
804  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
805  VP56mv *top_mv;
806  VP56mv *left_mv = left_mb->bmv;
807  VP56mv *cur_mv = mb->bmv;
808 
809  if (!layout) // layout is inlined, s->mb_layout is not
810  top_mb = &mb[2];
811  else
812  top_mb = &mb[-s->mb_width - 1];
813  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
814  top_mv = top_mb->bmv;
815 
819  else
820  part_idx = VP8_SPLITMVMODE_8x8;
821  } else {
822  part_idx = VP8_SPLITMVMODE_4x4;
823  }
824 
825  num = vp8_mbsplit_count[part_idx];
826  mbsplits_cur = vp8_mbsplits[part_idx],
827  firstidx = vp8_mbfirstidx[part_idx];
828  mb->partitioning = part_idx;
829 
830  for (n = 0; n < num; n++) {
831  int k = firstidx[n];
832  uint32_t left, above;
833  const uint8_t *submv_prob;
834 
835  if (!(k & 3))
836  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
837  else
838  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
839  if (k <= 3)
840  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
841  else
842  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
843 
844  submv_prob = get_submv_prob(left, above, is_vp7);
845 
846  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
847  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
848  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
849  mb->bmv[n].y = mb->mv.y +
850  read_mv_component(c, s->prob->mvc[0], is_vp7);
851  mb->bmv[n].x = mb->mv.x +
852  read_mv_component(c, s->prob->mvc[1], is_vp7);
853  } else {
854  AV_ZERO32(&mb->bmv[n]);
855  }
856  } else {
857  AV_WN32A(&mb->bmv[n], above);
858  }
859  } else {
860  AV_WN32A(&mb->bmv[n], left);
861  }
862  }
863 
864  return num;
865 }
866 
878 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
879  int xoffset, int yoffset, int boundary,
880  int *edge_x, int *edge_y)
881 {
882  int vwidth = mb_width + 1;
883  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
884  if (new < boundary || new % vwidth == vwidth - 1)
885  return 0;
886  *edge_y = new / vwidth;
887  *edge_x = new % vwidth;
888  return 1;
889 }
890 
891 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
892 {
893  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
894 }
895 
896 static av_always_inline
898  int mb_x, int mb_y, int layout)
899 {
900  VP8Macroblock *mb_edge[12];
901  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
902  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
903  int idx = CNT_ZERO;
904  VP56mv near_mv[3];
905  uint8_t cnt[3] = { 0 };
906  VP56RangeCoder *c = &s->c;
907  int i;
908 
909  AV_ZERO32(&near_mv[0]);
910  AV_ZERO32(&near_mv[1]);
911  AV_ZERO32(&near_mv[2]);
912 
913  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
914  const VP7MVPred * pred = &vp7_mv_pred[i];
915  int edge_x, edge_y;
916 
917  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
918  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
919  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
920  ? s->macroblocks_base + 1 + edge_x +
921  (s->mb_width + 1) * (edge_y + 1)
922  : s->macroblocks + edge_x +
923  (s->mb_height - edge_y - 1) * 2;
924  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
925  if (mv) {
926  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
927  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
928  idx = CNT_NEAREST;
929  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
930  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
931  continue;
932  idx = CNT_NEAR;
933  } else {
934  AV_WN32A(&near_mv[CNT_NEAR], mv);
935  idx = CNT_NEAR;
936  }
937  } else {
938  AV_WN32A(&near_mv[CNT_NEAREST], mv);
939  idx = CNT_NEAREST;
940  }
941  } else {
942  idx = CNT_ZERO;
943  }
944  } else {
945  idx = CNT_ZERO;
946  }
947  cnt[idx] += vp7_mv_pred[i].score;
948  }
949 
951 
952  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
953  mb->mode = VP8_MVMODE_MV;
954 
955  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
956 
957  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
958 
959  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
960  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
961  else
962  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
963 
964  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
965  mb->mode = VP8_MVMODE_SPLIT;
966  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
967  } else {
968  mb->mv.y += read_mv_component(c, s->prob->mvc[0], IS_VP7);
969  mb->mv.x += read_mv_component(c, s->prob->mvc[1], IS_VP7);
970  mb->bmv[0] = mb->mv;
971  }
972  } else {
973  mb->mv = near_mv[CNT_NEAR];
974  mb->bmv[0] = mb->mv;
975  }
976  } else {
977  mb->mv = near_mv[CNT_NEAREST];
978  mb->bmv[0] = mb->mv;
979  }
980  } else {
981  mb->mode = VP8_MVMODE_ZERO;
982  AV_ZERO32(&mb->mv);
983  mb->bmv[0] = mb->mv;
984  }
985 }
986 
987 static av_always_inline
989  int mb_x, int mb_y, int layout)
990 {
991  VP8Macroblock *mb_edge[3] = { 0 /* top */,
992  mb - 1 /* left */,
993  0 /* top-left */ };
994  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
995  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
996  int idx = CNT_ZERO;
997  int cur_sign_bias = s->sign_bias[mb->ref_frame];
998  int8_t *sign_bias = s->sign_bias;
999  VP56mv near_mv[4];
1000  uint8_t cnt[4] = { 0 };
1001  VP56RangeCoder *c = &s->c;
1002 
1003  if (!layout) { // layout is inlined (s->mb_layout is not)
1004  mb_edge[0] = mb + 2;
1005  mb_edge[2] = mb + 1;
1006  } else {
1007  mb_edge[0] = mb - s->mb_width - 1;
1008  mb_edge[2] = mb - s->mb_width - 2;
1009  }
1010 
1011  AV_ZERO32(&near_mv[0]);
1012  AV_ZERO32(&near_mv[1]);
1013  AV_ZERO32(&near_mv[2]);
1014 
1015  /* Process MB on top, left and top-left */
1016 #define MV_EDGE_CHECK(n) \
1017  { \
1018  VP8Macroblock *edge = mb_edge[n]; \
1019  int edge_ref = edge->ref_frame; \
1020  if (edge_ref != VP56_FRAME_CURRENT) { \
1021  uint32_t mv = AV_RN32A(&edge->mv); \
1022  if (mv) { \
1023  if (cur_sign_bias != sign_bias[edge_ref]) { \
1024  /* SWAR negate of the values in mv. */ \
1025  mv = ~mv; \
1026  mv = ((mv & 0x7fff7fff) + \
1027  0x00010001) ^ (mv & 0x80008000); \
1028  } \
1029  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1030  AV_WN32A(&near_mv[++idx], mv); \
1031  cnt[idx] += 1 + (n != 2); \
1032  } else \
1033  cnt[CNT_ZERO] += 1 + (n != 2); \
1034  } \
1035  }
1036 
1037  MV_EDGE_CHECK(0)
1038  MV_EDGE_CHECK(1)
1039  MV_EDGE_CHECK(2)
1040 
1042  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1043  mb->mode = VP8_MVMODE_MV;
1044 
1045  /* If we have three distinct MVs, merge first and last if they're the same */
1046  if (cnt[CNT_SPLITMV] &&
1047  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1048  cnt[CNT_NEAREST] += 1;
1049 
1050  /* Swap near and nearest if necessary */
1051  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1052  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1053  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1054  }
1055 
1056  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1057  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1058  /* Choose the best mv out of 0,0 and the nearest mv */
1059  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1060  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1061  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1062  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1063 
1064  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1065  mb->mode = VP8_MVMODE_SPLIT;
1066  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1067  } else {
1068  mb->mv.y += read_mv_component(c, s->prob->mvc[0], IS_VP8);
1069  mb->mv.x += read_mv_component(c, s->prob->mvc[1], IS_VP8);
1070  mb->bmv[0] = mb->mv;
1071  }
1072  } else {
1073  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1074  mb->bmv[0] = mb->mv;
1075  }
1076  } else {
1077  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1078  mb->bmv[0] = mb->mv;
1079  }
1080  } else {
1081  mb->mode = VP8_MVMODE_ZERO;
1082  AV_ZERO32(&mb->mv);
1083  mb->bmv[0] = mb->mv;
1084  }
1085 }
1086 
1087 static av_always_inline
1089  int mb_x, int keyframe, int layout)
1090 {
1091  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1092 
1093  if (layout == 1) {
1094  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1095  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1096  }
1097  if (keyframe) {
1098  int x, y;
1099  uint8_t *top;
1100  uint8_t *const left = s->intra4x4_pred_mode_left;
1101  if (layout == 1)
1102  top = mb->intra4x4_pred_mode_top;
1103  else
1104  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1105  for (y = 0; y < 4; y++) {
1106  for (x = 0; x < 4; x++) {
1107  const uint8_t *ctx;
1108  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1109  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1110  left[y] = top[x] = *intra4x4;
1111  intra4x4++;
1112  }
1113  }
1114  } else {
1115  int i;
1116  for (i = 0; i < 16; i++)
1117  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1119  }
1120 }
1121 
1122 static av_always_inline
1123 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1124  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1125 {
1126  VP56RangeCoder *c = &s->c;
1127  const char *vp7_feature_name[] = { "q-index",
1128  "lf-delta",
1129  "partial-golden-update",
1130  "blit-pitch" };
1131  if (is_vp7) {
1132  int i;
1133  *segment = 0;
1134  for (i = 0; i < 4; i++) {
1135  if (s->feature_enabled[i]) {
1136  if (vp56_rac_get_prob(c, s->feature_present_prob[i])) {
1138  s->feature_index_prob[i]);
1140  "Feature %s present in macroblock (value 0x%x)\n",
1141  vp7_feature_name[i], s->feature_value[i][index]);
1142  }
1143  }
1144  }
1145  } else if (s->segmentation.update_map)
1146  *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
1147  else if (s->segmentation.enabled)
1148  *segment = ref ? *ref : *segment;
1149  mb->segment = *segment;
1150 
1151  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1152 
1153  if (s->keyframe) {
1156 
1157  if (mb->mode == MODE_I4x4) {
1158  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1159  } else {
1160  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1161  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1162  if (s->mb_layout == 1)
1163  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1164  else
1165  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1166  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1167  }
1168 
1172  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1173  // inter MB, 16.2
1174  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1175  mb->ref_frame =
1176  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1178  else
1180  s->ref_count[mb->ref_frame - 1]++;
1181 
1182  // motion vectors, 16.3
1183  if (is_vp7)
1184  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1185  else
1186  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1187  } else {
1188  // intra MB, 16.1
1190 
1191  if (mb->mode == MODE_I4x4)
1192  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1193 
1195  s->prob->pred8x8c);
1198  AV_ZERO32(&mb->bmv[0]);
1199  }
1200 }
1201 
1212 static av_always_inline
1214  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1215  int i, uint8_t *token_prob, int16_t qmul[2],
1216  const uint8_t scan[16], int vp7)
1217 {
1218  VP56RangeCoder c = *r;
1219  goto skip_eob;
1220  do {
1221  int coeff;
1222 restart:
1223  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1224  break;
1225 
1226 skip_eob:
1227  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1228  if (++i == 16)
1229  break; // invalid input; blocks should end with EOB
1230  token_prob = probs[i][0];
1231  if (vp7)
1232  goto restart;
1233  goto skip_eob;
1234  }
1235 
1236  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1237  coeff = 1;
1238  token_prob = probs[i + 1][1];
1239  } else {
1240  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1241  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1242  if (coeff)
1243  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1244  coeff += 2;
1245  } else {
1246  // DCT_CAT*
1247  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1248  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1249  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1250  } else { // DCT_CAT2
1251  coeff = 7;
1252  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1253  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1254  }
1255  } else { // DCT_CAT3 and up
1256  int a = vp56_rac_get_prob(&c, token_prob[8]);
1257  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1258  int cat = (a << 1) + b;
1259  coeff = 3 + (8 << cat);
1260  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1261  }
1262  }
1263  token_prob = probs[i + 1][2];
1264  }
1265  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1266  } while (++i < 16);
1267 
1268  *r = c;
1269  return i;
1270 }
1271 
1272 static av_always_inline
1273 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1274 {
1275  int16_t dc = block[0];
1276  int ret = 0;
1277 
1278  if (pred[1] > 3) {
1279  dc += pred[0];
1280  ret = 1;
1281  }
1282 
1283  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1284  block[0] = pred[0] = dc;
1285  pred[1] = 0;
1286  } else {
1287  if (pred[0] == dc)
1288  pred[1]++;
1289  block[0] = pred[0] = dc;
1290  }
1291 
1292  return ret;
1293 }
1294 
1296  int16_t block[16],
1297  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1298  int i, uint8_t *token_prob,
1299  int16_t qmul[2],
1300  const uint8_t scan[16])
1301 {
1302  return decode_block_coeffs_internal(r, block, probs, i,
1303  token_prob, qmul, scan, IS_VP7);
1304 }
1305 
1306 #ifndef vp8_decode_block_coeffs_internal
1308  int16_t block[16],
1309  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1310  int i, uint8_t *token_prob,
1311  int16_t qmul[2])
1312 {
1313  return decode_block_coeffs_internal(r, block, probs, i,
1314  token_prob, qmul, zigzag_scan, IS_VP8);
1315 }
1316 #endif
1317 
1330 static av_always_inline
1332  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1333  int i, int zero_nhood, int16_t qmul[2],
1334  const uint8_t scan[16], int vp7)
1335 {
1336  uint8_t *token_prob = probs[i][zero_nhood];
1337  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1338  return 0;
1339  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1340  token_prob, qmul, scan)
1341  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1342  token_prob, qmul);
1343 }
1344 
1345 static av_always_inline
1347  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1348  int is_vp7)
1349 {
1350  int i, x, y, luma_start = 0, luma_ctx = 3;
1351  int nnz_pred, nnz, nnz_total = 0;
1352  int segment = mb->segment;
1353  int block_dc = 0;
1354 
1355  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1356  nnz_pred = t_nnz[8] + l_nnz[8];
1357 
1358  // decode DC values and do hadamard
1359  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1360  nnz_pred, s->qmat[segment].luma_dc_qmul,
1361  zigzag_scan, is_vp7);
1362  l_nnz[8] = t_nnz[8] = !!nnz;
1363 
1364  if (is_vp7 && mb->mode > MODE_I4x4) {
1365  nnz |= inter_predict_dc(td->block_dc,
1366  s->inter_dc_pred[mb->ref_frame - 1]);
1367  }
1368 
1369  if (nnz) {
1370  nnz_total += nnz;
1371  block_dc = 1;
1372  if (nnz == 1)
1373  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1374  else
1375  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1376  }
1377  luma_start = 1;
1378  luma_ctx = 0;
1379  }
1380 
1381  // luma blocks
1382  for (y = 0; y < 4; y++)
1383  for (x = 0; x < 4; x++) {
1384  nnz_pred = l_nnz[y] + t_nnz[x];
1385  nnz = decode_block_coeffs(c, td->block[y][x],
1386  s->prob->token[luma_ctx],
1387  luma_start, nnz_pred,
1388  s->qmat[segment].luma_qmul,
1389  s->prob[0].scan, is_vp7);
1390  /* nnz+block_dc may be one more than the actual last index,
1391  * but we don't care */
1392  td->non_zero_count_cache[y][x] = nnz + block_dc;
1393  t_nnz[x] = l_nnz[y] = !!nnz;
1394  nnz_total += nnz;
1395  }
1396 
1397  // chroma blocks
1398  // TODO: what to do about dimensions? 2nd dim for luma is x,
1399  // but for chroma it's (y<<1)|x
1400  for (i = 4; i < 6; i++)
1401  for (y = 0; y < 2; y++)
1402  for (x = 0; x < 2; x++) {
1403  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1404  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1405  s->prob->token[2], 0, nnz_pred,
1406  s->qmat[segment].chroma_qmul,
1407  s->prob[0].scan, is_vp7);
1408  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1409  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1410  nnz_total += nnz;
1411  }
1412 
1413  // if there were no coded coeffs despite the macroblock not being marked skip,
1414  // we MUST not do the inner loop filter and should not do IDCT
1415  // Since skip isn't used for bitstream prediction, just manually set it.
1416  if (!nnz_total)
1417  mb->skip = 1;
1418 }
1419 
1420 static av_always_inline
1421 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1422  uint8_t *src_cb, uint8_t *src_cr,
1423  int linesize, int uvlinesize, int simple)
1424 {
1425  AV_COPY128(top_border, src_y + 15 * linesize);
1426  if (!simple) {
1427  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1428  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1429  }
1430 }
1431 
1432 static av_always_inline
1433 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1434  uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
1435  int mb_y, int mb_width, int simple, int xchg)
1436 {
1437  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1438  src_y -= linesize;
1439  src_cb -= uvlinesize;
1440  src_cr -= uvlinesize;
1441 
1442 #define XCHG(a, b, xchg) \
1443  do { \
1444  if (xchg) \
1445  AV_SWAP64(b, a); \
1446  else \
1447  AV_COPY64(b, a); \
1448  } while (0)
1449 
1450  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1451  XCHG(top_border, src_y, xchg);
1452  XCHG(top_border + 8, src_y + 8, 1);
1453  if (mb_x < mb_width - 1)
1454  XCHG(top_border + 32, src_y + 16, 1);
1455 
1456  // only copy chroma for normal loop filter
1457  // or to initialize the top row to 127
1458  if (!simple || !mb_y) {
1459  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1460  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1461  XCHG(top_border + 16, src_cb, 1);
1462  XCHG(top_border + 24, src_cr, 1);
1463  }
1464 }
1465 
1466 static av_always_inline
1467 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1468 {
1469  if (!mb_x)
1470  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1471  else
1472  return mb_y ? mode : LEFT_DC_PRED8x8;
1473 }
1474 
1475 static av_always_inline
1476 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1477 {
1478  if (!mb_x)
1479  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1480  else
1481  return mb_y ? mode : HOR_PRED8x8;
1482 }
1483 
1484 static av_always_inline
1485 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1486 {
1487  switch (mode) {
1488  case DC_PRED8x8:
1489  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1490  case VERT_PRED8x8:
1491  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1492  case HOR_PRED8x8:
1493  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1494  case PLANE_PRED8x8: /* TM */
1495  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1496  }
1497  return mode;
1498 }
1499 
1500 static av_always_inline
1501 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1502 {
1503  if (!mb_x) {
1504  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1505  } else {
1506  return mb_y ? mode : HOR_VP8_PRED;
1507  }
1508 }
1509 
1510 static av_always_inline
1511 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1512  int *copy_buf, int vp7)
1513 {
1514  switch (mode) {
1515  case VERT_PRED:
1516  if (!mb_x && mb_y) {
1517  *copy_buf = 1;
1518  return mode;
1519  }
1520  /* fall-through */
1521  case DIAG_DOWN_LEFT_PRED:
1522  case VERT_LEFT_PRED:
1523  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1524  case HOR_PRED:
1525  if (!mb_y) {
1526  *copy_buf = 1;
1527  return mode;
1528  }
1529  /* fall-through */
1530  case HOR_UP_PRED:
1531  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1532  case TM_VP8_PRED:
1533  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1534  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1535  * as 16x16/8x8 DC */
1536  case DIAG_DOWN_RIGHT_PRED:
1537  case VERT_RIGHT_PRED:
1538  case HOR_DOWN_PRED:
1539  if (!mb_y || !mb_x)
1540  *copy_buf = 1;
1541  return mode;
1542  }
1543  return mode;
1544 }
1545 
1546 static av_always_inline
1548  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1549 {
1550  int x, y, mode, nnz;
1551  uint32_t tr;
1552 
1553  /* for the first row, we need to run xchg_mb_border to init the top edge
1554  * to 127 otherwise, skip it if we aren't going to deblock */
1555  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1556  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1557  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1558  s->filter.simple, 1);
1559 
1560  if (mb->mode < MODE_I4x4) {
1561  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1562  s->hpc.pred16x16[mode](dst[0], s->linesize);
1563  } else {
1564  uint8_t *ptr = dst[0];
1565  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1566  const uint8_t lo = is_vp7 ? 128 : 127;
1567  const uint8_t hi = is_vp7 ? 128 : 129;
1568  uint8_t tr_top[4] = { lo, lo, lo, lo };
1569 
1570  // all blocks on the right edge of the macroblock use bottom edge
1571  // the top macroblock for their topright edge
1572  uint8_t *tr_right = ptr - s->linesize + 16;
1573 
1574  // if we're on the right edge of the frame, said edge is extended
1575  // from the top macroblock
1576  if (mb_y && mb_x == s->mb_width - 1) {
1577  tr = tr_right[-1] * 0x01010101u;
1578  tr_right = (uint8_t *) &tr;
1579  }
1580 
1581  if (mb->skip)
1583 
1584  for (y = 0; y < 4; y++) {
1585  uint8_t *topright = ptr + 4 - s->linesize;
1586  for (x = 0; x < 4; x++) {
1587  int copy = 0, linesize = s->linesize;
1588  uint8_t *dst = ptr + 4 * x;
1589  DECLARE_ALIGNED(4, uint8_t, copy_dst)[5 * 8];
1590 
1591  if ((y == 0 || x == 3) && mb_y == 0) {
1592  topright = tr_top;
1593  } else if (x == 3)
1594  topright = tr_right;
1595 
1596  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1597  mb_y + y, &copy, is_vp7);
1598  if (copy) {
1599  dst = copy_dst + 12;
1600  linesize = 8;
1601  if (!(mb_y + y)) {
1602  copy_dst[3] = lo;
1603  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1604  } else {
1605  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1606  if (!(mb_x + x)) {
1607  copy_dst[3] = hi;
1608  } else {
1609  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1610  }
1611  }
1612  if (!(mb_x + x)) {
1613  copy_dst[11] =
1614  copy_dst[19] =
1615  copy_dst[27] =
1616  copy_dst[35] = hi;
1617  } else {
1618  copy_dst[11] = ptr[4 * x - 1];
1619  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1620  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1621  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1622  }
1623  }
1624  s->hpc.pred4x4[mode](dst, topright, linesize);
1625  if (copy) {
1626  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1627  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1628  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1629  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1630  }
1631 
1632  nnz = td->non_zero_count_cache[y][x];
1633  if (nnz) {
1634  if (nnz == 1)
1635  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1636  td->block[y][x], s->linesize);
1637  else
1638  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1639  td->block[y][x], s->linesize);
1640  }
1641  topright += 4;
1642  }
1643 
1644  ptr += 4 * s->linesize;
1645  intra4x4 += 4;
1646  }
1647  }
1648 
1650  mb_x, mb_y, is_vp7);
1651  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1652  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1653 
1654  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1655  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1656  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1657  s->filter.simple, 0);
1658 }
1659 
1660 static const uint8_t subpel_idx[3][8] = {
1661  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1662  // also function pointer index
1663  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1664  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1665 };
1666 
1683 static av_always_inline
1685  ThreadFrame *ref, const VP56mv *mv,
1686  int x_off, int y_off, int block_w, int block_h,
1687  int width, int height, ptrdiff_t linesize,
1688  vp8_mc_func mc_func[3][3])
1689 {
1690  uint8_t *src = ref->f->data[0];
1691 
1692  if (AV_RN32A(mv)) {
1693  int src_linesize = linesize;
1694 
1695  int mx = (mv->x << 1) & 7, mx_idx = subpel_idx[0][mx];
1696  int my = (mv->y << 1) & 7, my_idx = subpel_idx[0][my];
1697 
1698  x_off += mv->x >> 2;
1699  y_off += mv->y >> 2;
1700 
1701  // edge emulation
1702  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1703  src += y_off * linesize + x_off;
1704  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1705  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1707  src - my_idx * linesize - mx_idx,
1708  EDGE_EMU_LINESIZE, linesize,
1709  block_w + subpel_idx[1][mx],
1710  block_h + subpel_idx[1][my],
1711  x_off - mx_idx, y_off - my_idx,
1712  width, height);
1713  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1714  src_linesize = EDGE_EMU_LINESIZE;
1715  }
1716  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1717  } else {
1718  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1719  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1720  linesize, block_h, 0, 0);
1721  }
1722 }
1723 
1741 static av_always_inline
1743  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1744  int x_off, int y_off, int block_w, int block_h,
1745  int width, int height, ptrdiff_t linesize,
1746  vp8_mc_func mc_func[3][3])
1747 {
1748  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1749 
1750  if (AV_RN32A(mv)) {
1751  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1752  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1753 
1754  x_off += mv->x >> 3;
1755  y_off += mv->y >> 3;
1756 
1757  // edge emulation
1758  src1 += y_off * linesize + x_off;
1759  src2 += y_off * linesize + x_off;
1760  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1761  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1762  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1764  src1 - my_idx * linesize - mx_idx,
1765  EDGE_EMU_LINESIZE, linesize,
1766  block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1767  x_off - mx_idx, y_off - my_idx, width, height);
1768  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1769  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1770 
1772  src2 - my_idx * linesize - mx_idx,
1773  EDGE_EMU_LINESIZE, linesize,
1774  block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1775  x_off - mx_idx, y_off - my_idx, width, height);
1776  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1777  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1778  } else {
1779  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1780  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1781  }
1782  } else {
1783  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1784  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1785  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1786  }
1787 }
1788 
1789 static av_always_inline
1791  ThreadFrame *ref_frame, int x_off, int y_off,
1792  int bx_off, int by_off, int block_w, int block_h,
1793  int width, int height, VP56mv *mv)
1794 {
1795  VP56mv uvmv = *mv;
1796 
1797  /* Y */
1798  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1799  ref_frame, mv, x_off + bx_off, y_off + by_off,
1800  block_w, block_h, width, height, s->linesize,
1801  s->put_pixels_tab[block_w == 8]);
1802 
1803  /* U/V */
1804  if (s->profile == 3) {
1805  /* this block only applies VP8; it is safe to check
1806  * only the profile, as VP7 profile <= 1 */
1807  uvmv.x &= ~7;
1808  uvmv.y &= ~7;
1809  }
1810  x_off >>= 1;
1811  y_off >>= 1;
1812  bx_off >>= 1;
1813  by_off >>= 1;
1814  width >>= 1;
1815  height >>= 1;
1816  block_w >>= 1;
1817  block_h >>= 1;
1818  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1819  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1820  &uvmv, x_off + bx_off, y_off + by_off,
1821  block_w, block_h, width, height, s->uvlinesize,
1822  s->put_pixels_tab[1 + (block_w == 4)]);
1823 }
1824 
1825 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1826  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1827 static av_always_inline
1828 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1829  int mb_xy, int ref)
1830 {
1831  /* Don't prefetch refs that haven't been used very often this frame. */
1832  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1833  int x_off = mb_x << 4, y_off = mb_y << 4;
1834  int mx = (mb->mv.x >> 2) + x_off + 8;
1835  int my = (mb->mv.y >> 2) + y_off;
1836  uint8_t **src = s->framep[ref]->tf.f->data;
1837  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1838  /* For threading, a ff_thread_await_progress here might be useful, but
1839  * it actually slows down the decoder. Since a bad prefetch doesn't
1840  * generate bad decoder output, we don't run it here. */
1841  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1842  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1843  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1844  }
1845 }
1846 
1850 static av_always_inline
1852  VP8Macroblock *mb, int mb_x, int mb_y)
1853 {
1854  int x_off = mb_x << 4, y_off = mb_y << 4;
1855  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1856  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1857  VP56mv *bmv = mb->bmv;
1858 
1859  switch (mb->partitioning) {
1860  case VP8_SPLITMVMODE_NONE:
1861  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1862  0, 0, 16, 16, width, height, &mb->mv);
1863  break;
1864  case VP8_SPLITMVMODE_4x4: {
1865  int x, y;
1866  VP56mv uvmv;
1867 
1868  /* Y */
1869  for (y = 0; y < 4; y++) {
1870  for (x = 0; x < 4; x++) {
1871  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1872  ref, &bmv[4 * y + x],
1873  4 * x + x_off, 4 * y + y_off, 4, 4,
1874  width, height, s->linesize,
1875  s->put_pixels_tab[2]);
1876  }
1877  }
1878 
1879  /* U/V */
1880  x_off >>= 1;
1881  y_off >>= 1;
1882  width >>= 1;
1883  height >>= 1;
1884  for (y = 0; y < 2; y++) {
1885  for (x = 0; x < 2; x++) {
1886  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1887  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1888  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1889  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1890  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1891  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1892  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1893  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1894  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1895  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1896  if (s->profile == 3) {
1897  uvmv.x &= ~7;
1898  uvmv.y &= ~7;
1899  }
1900  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1901  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1902  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1903  width, height, s->uvlinesize,
1904  s->put_pixels_tab[2]);
1905  }
1906  }
1907  break;
1908  }
1909  case VP8_SPLITMVMODE_16x8:
1910  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1911  0, 0, 16, 8, width, height, &bmv[0]);
1912  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1913  0, 8, 16, 8, width, height, &bmv[1]);
1914  break;
1915  case VP8_SPLITMVMODE_8x16:
1916  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1917  0, 0, 8, 16, width, height, &bmv[0]);
1918  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1919  8, 0, 8, 16, width, height, &bmv[1]);
1920  break;
1921  case VP8_SPLITMVMODE_8x8:
1922  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1923  0, 0, 8, 8, width, height, &bmv[0]);
1924  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1925  8, 0, 8, 8, width, height, &bmv[1]);
1926  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1927  0, 8, 8, 8, width, height, &bmv[2]);
1928  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1929  8, 8, 8, 8, width, height, &bmv[3]);
1930  break;
1931  }
1932 }
1933 
1934 static av_always_inline
1936 {
1937  int x, y, ch;
1938 
1939  if (mb->mode != MODE_I4x4) {
1940  uint8_t *y_dst = dst[0];
1941  for (y = 0; y < 4; y++) {
1942  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1943  if (nnz4) {
1944  if (nnz4 & ~0x01010101) {
1945  for (x = 0; x < 4; x++) {
1946  if ((uint8_t) nnz4 == 1)
1947  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1948  td->block[y][x],
1949  s->linesize);
1950  else if ((uint8_t) nnz4 > 1)
1951  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
1952  td->block[y][x],
1953  s->linesize);
1954  nnz4 >>= 8;
1955  if (!nnz4)
1956  break;
1957  }
1958  } else {
1959  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
1960  }
1961  }
1962  y_dst += 4 * s->linesize;
1963  }
1964  }
1965 
1966  for (ch = 0; ch < 2; ch++) {
1967  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
1968  if (nnz4) {
1969  uint8_t *ch_dst = dst[1 + ch];
1970  if (nnz4 & ~0x01010101) {
1971  for (y = 0; y < 2; y++) {
1972  for (x = 0; x < 2; x++) {
1973  if ((uint8_t) nnz4 == 1)
1974  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
1975  td->block[4 + ch][(y << 1) + x],
1976  s->uvlinesize);
1977  else if ((uint8_t) nnz4 > 1)
1978  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
1979  td->block[4 + ch][(y << 1) + x],
1980  s->uvlinesize);
1981  nnz4 >>= 8;
1982  if (!nnz4)
1983  goto chroma_idct_end;
1984  }
1985  ch_dst += 4 * s->uvlinesize;
1986  }
1987  } else {
1988  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
1989  }
1990  }
1991 chroma_idct_end:
1992  ;
1993  }
1994 }
1995 
1996 static av_always_inline
1998  VP8FilterStrength *f, int is_vp7)
1999 {
2000  int interior_limit, filter_level;
2001 
2002  if (s->segmentation.enabled) {
2003  filter_level = s->segmentation.filter_level[mb->segment];
2004  if (!s->segmentation.absolute_vals)
2005  filter_level += s->filter.level;
2006  } else
2007  filter_level = s->filter.level;
2008 
2009  if (s->lf_delta.enabled) {
2010  filter_level += s->lf_delta.ref[mb->ref_frame];
2011  filter_level += s->lf_delta.mode[mb->mode];
2012  }
2013 
2014  filter_level = av_clip_uintp2(filter_level, 6);
2015 
2016  interior_limit = filter_level;
2017  if (s->filter.sharpness) {
2018  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2019  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2020  }
2021  interior_limit = FFMAX(interior_limit, 1);
2022 
2023  f->filter_level = filter_level;
2024  f->inner_limit = interior_limit;
2025  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2026  mb->mode == VP8_MVMODE_SPLIT;
2027 }
2028 
2029 static av_always_inline
2031  int mb_x, int mb_y, int is_vp7)
2032 {
2033  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2034  int filter_level = f->filter_level;
2035  int inner_limit = f->inner_limit;
2036  int inner_filter = f->inner_filter;
2037  int linesize = s->linesize;
2038  int uvlinesize = s->uvlinesize;
2039  static const uint8_t hev_thresh_lut[2][64] = {
2040  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2041  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2042  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2043  3, 3, 3, 3 },
2044  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2045  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2046  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2047  2, 2, 2, 2 }
2048  };
2049 
2050  if (!filter_level)
2051  return;
2052 
2053  if (is_vp7) {
2054  bedge_lim_y = filter_level;
2055  bedge_lim_uv = filter_level * 2;
2056  mbedge_lim = filter_level + 2;
2057  } else {
2058  bedge_lim_y =
2059  bedge_lim_uv = filter_level * 2 + inner_limit;
2060  mbedge_lim = bedge_lim_y + 4;
2061  }
2062 
2063  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2064 
2065  if (mb_x) {
2066  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2067  mbedge_lim, inner_limit, hev_thresh);
2068  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2069  mbedge_lim, inner_limit, hev_thresh);
2070  }
2071 
2072 #define H_LOOP_FILTER_16Y_INNER(cond) \
2073  if (cond && inner_filter) { \
2074  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2075  bedge_lim_y, inner_limit, \
2076  hev_thresh); \
2077  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2078  bedge_lim_y, inner_limit, \
2079  hev_thresh); \
2080  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2081  bedge_lim_y, inner_limit, \
2082  hev_thresh); \
2083  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2084  uvlinesize, bedge_lim_uv, \
2085  inner_limit, hev_thresh); \
2086  }
2087 
2088  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2089 
2090  if (mb_y) {
2091  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2092  mbedge_lim, inner_limit, hev_thresh);
2093  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2094  mbedge_lim, inner_limit, hev_thresh);
2095  }
2096 
2097  if (inner_filter) {
2098  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2099  linesize, bedge_lim_y,
2100  inner_limit, hev_thresh);
2101  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2102  linesize, bedge_lim_y,
2103  inner_limit, hev_thresh);
2104  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2105  linesize, bedge_lim_y,
2106  inner_limit, hev_thresh);
2107  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2108  dst[2] + 4 * uvlinesize,
2109  uvlinesize, bedge_lim_uv,
2110  inner_limit, hev_thresh);
2111  }
2112 
2113  H_LOOP_FILTER_16Y_INNER(is_vp7)
2114 }
2115 
2116 static av_always_inline
2118  int mb_x, int mb_y)
2119 {
2120  int mbedge_lim, bedge_lim;
2121  int filter_level = f->filter_level;
2122  int inner_limit = f->inner_limit;
2123  int inner_filter = f->inner_filter;
2124  int linesize = s->linesize;
2125 
2126  if (!filter_level)
2127  return;
2128 
2129  bedge_lim = 2 * filter_level + inner_limit;
2130  mbedge_lim = bedge_lim + 4;
2131 
2132  if (mb_x)
2133  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2134  if (inner_filter) {
2135  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2136  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2137  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2138  }
2139 
2140  if (mb_y)
2141  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2142  if (inner_filter) {
2143  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2144  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2145  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2146  }
2147 }
2148 
2149 #define MARGIN (16 << 2)
2150 static av_always_inline
2152  VP8Frame *prev_frame, int is_vp7)
2153 {
2154  VP8Context *s = avctx->priv_data;
2155  int mb_x, mb_y;
2156 
2157  s->mv_min.y = -MARGIN;
2158  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2159  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2160  VP8Macroblock *mb = s->macroblocks_base +
2161  ((s->mb_width + 1) * (mb_y + 1) + 1);
2162  int mb_xy = mb_y * s->mb_width;
2163 
2164  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2165 
2166  s->mv_min.x = -MARGIN;
2167  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2168  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2169  if (mb_y == 0)
2170  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2171  DC_PRED * 0x01010101);
2172  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2173  prev_frame && prev_frame->seg_map ?
2174  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2175  s->mv_min.x -= 64;
2176  s->mv_max.x -= 64;
2177  }
2178  s->mv_min.y -= 64;
2179  s->mv_max.y -= 64;
2180  }
2181 }
2182 
2183 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2184  VP8Frame *prev_frame)
2185 {
2186  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2187 }
2188 
2189 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2190  VP8Frame *prev_frame)
2191 {
2192  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2193 }
2194 
2195 #if HAVE_THREADS
2196 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2197  do { \
2198  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2199  if (otd->thread_mb_pos < tmp) { \
2200  pthread_mutex_lock(&otd->lock); \
2201  td->wait_mb_pos = tmp; \
2202  do { \
2203  if (otd->thread_mb_pos >= tmp) \
2204  break; \
2205  pthread_cond_wait(&otd->cond, &otd->lock); \
2206  } while (1); \
2207  td->wait_mb_pos = INT_MAX; \
2208  pthread_mutex_unlock(&otd->lock); \
2209  } \
2210  } while (0);
2211 
2212 #define update_pos(td, mb_y, mb_x) \
2213  do { \
2214  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2215  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2216  (num_jobs > 1); \
2217  int is_null = !next_td || !prev_td; \
2218  int pos_check = (is_null) ? 1 \
2219  : (next_td != td && \
2220  pos >= next_td->wait_mb_pos) || \
2221  (prev_td != td && \
2222  pos >= prev_td->wait_mb_pos); \
2223  td->thread_mb_pos = pos; \
2224  if (sliced_threading && pos_check) { \
2225  pthread_mutex_lock(&td->lock); \
2226  pthread_cond_broadcast(&td->cond); \
2227  pthread_mutex_unlock(&td->lock); \
2228  } \
2229  } while (0);
2230 #else
2231 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)
2232 #define update_pos(td, mb_y, mb_x)
2233 #endif
2234 
2235 static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2236  int jobnr, int threadnr, int is_vp7)
2237 {
2238  VP8Context *s = avctx->priv_data;
2239  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2240  int mb_y = td->thread_mb_pos >> 16;
2241  int mb_x, mb_xy = mb_y * s->mb_width;
2242  int num_jobs = s->num_jobs;
2243  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2244  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2245  VP8Macroblock *mb;
2246  uint8_t *dst[3] = {
2247  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2248  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2249  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2250  };
2251  if (mb_y == 0)
2252  prev_td = td;
2253  else
2254  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2255  if (mb_y == s->mb_height - 1)
2256  next_td = td;
2257  else
2258  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2259  if (s->mb_layout == 1)
2260  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2261  else {
2262  // Make sure the previous frame has read its segmentation map,
2263  // if we re-use the same map.
2264  if (prev_frame && s->segmentation.enabled &&
2266  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2267  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2268  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2269  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2270  }
2271 
2272  if (!is_vp7 || mb_y == 0)
2273  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2274 
2275  s->mv_min.x = -MARGIN;
2276  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2277 
2278  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2279  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2280  if (prev_td != td) {
2281  if (threadnr != 0) {
2282  check_thread_pos(td, prev_td,
2283  mb_x + (is_vp7 ? 2 : 1),
2284  mb_y - (is_vp7 ? 2 : 1));
2285  } else {
2286  check_thread_pos(td, prev_td,
2287  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2288  mb_y - (is_vp7 ? 2 : 1));
2289  }
2290  }
2291 
2292  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2293  s->linesize, 4);
2294  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2295  dst[2] - dst[1], 2);
2296 
2297  if (!s->mb_layout)
2298  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2299  prev_frame && prev_frame->seg_map ?
2300  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2301 
2302  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2303 
2304  if (!mb->skip)
2305  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2306 
2307  if (mb->mode <= MODE_I4x4)
2308  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2309  else
2310  inter_predict(s, td, dst, mb, mb_x, mb_y);
2311 
2312  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2313 
2314  if (!mb->skip) {
2315  idct_mb(s, td, dst, mb);
2316  } else {
2317  AV_ZERO64(td->left_nnz);
2318  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2319 
2320  /* Reset DC block predictors if they would exist
2321  * if the mb had coefficients */
2322  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2323  td->left_nnz[8] = 0;
2324  s->top_nnz[mb_x][8] = 0;
2325  }
2326  }
2327 
2328  if (s->deblock_filter)
2329  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2330 
2331  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2332  if (s->filter.simple)
2333  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2334  NULL, NULL, s->linesize, 0, 1);
2335  else
2336  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2337  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2338  }
2339 
2340  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2341 
2342  dst[0] += 16;
2343  dst[1] += 8;
2344  dst[2] += 8;
2345  s->mv_min.x -= 64;
2346  s->mv_max.x -= 64;
2347 
2348  if (mb_x == s->mb_width + 1) {
2349  update_pos(td, mb_y, s->mb_width + 3);
2350  } else {
2351  update_pos(td, mb_y, mb_x);
2352  }
2353  }
2354 }
2355 
2356 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2357  int jobnr, int threadnr, int is_vp7)
2358 {
2359  VP8Context *s = avctx->priv_data;
2360  VP8ThreadData *td = &s->thread_data[threadnr];
2361  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2362  AVFrame *curframe = s->curframe->tf.f;
2363  VP8Macroblock *mb;
2364  VP8ThreadData *prev_td, *next_td;
2365  uint8_t *dst[3] = {
2366  curframe->data[0] + 16 * mb_y * s->linesize,
2367  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2368  curframe->data[2] + 8 * mb_y * s->uvlinesize
2369  };
2370 
2371  if (s->mb_layout == 1)
2372  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2373  else
2374  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2375 
2376  if (mb_y == 0)
2377  prev_td = td;
2378  else
2379  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2380  if (mb_y == s->mb_height - 1)
2381  next_td = td;
2382  else
2383  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2384 
2385  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2386  VP8FilterStrength *f = &td->filter_strength[mb_x];
2387  if (prev_td != td)
2388  check_thread_pos(td, prev_td,
2389  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2390  if (next_td != td)
2391  if (next_td != &s->thread_data[0])
2392  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2393 
2394  if (num_jobs == 1) {
2395  if (s->filter.simple)
2396  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2397  NULL, NULL, s->linesize, 0, 1);
2398  else
2399  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2400  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2401  }
2402 
2403  if (s->filter.simple)
2404  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2405  else
2406  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2407  dst[0] += 16;
2408  dst[1] += 8;
2409  dst[2] += 8;
2410 
2411  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2412  }
2413 }
2414 
2415 static av_always_inline
2416 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2417  int threadnr, int is_vp7)
2418 {
2419  VP8Context *s = avctx->priv_data;
2420  VP8ThreadData *td = &s->thread_data[jobnr];
2421  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2422  VP8Frame *curframe = s->curframe;
2423  int mb_y, num_jobs = s->num_jobs;
2424 
2425  td->thread_nr = threadnr;
2426  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2427  if (mb_y >= s->mb_height)
2428  break;
2429  td->thread_mb_pos = mb_y << 16;
2430  vp8_decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, is_vp7);
2431  if (s->deblock_filter)
2432  vp8_filter_mb_row(avctx, tdata, jobnr, threadnr, is_vp7);
2433  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2434 
2435  s->mv_min.y -= 64;
2436  s->mv_max.y -= 64;
2437 
2438  if (avctx->active_thread_type == FF_THREAD_FRAME)
2439  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2440  }
2441 
2442  return 0;
2443 }
2444 
2445 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2446  int jobnr, int threadnr)
2447 {
2448  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2449 }
2450 
2451 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2452  int jobnr, int threadnr)
2453 {
2454  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2455 }
2456 
2457 
2458 static av_always_inline
2459 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2460  AVPacket *avpkt, int is_vp7)
2461 {
2462  VP8Context *s = avctx->priv_data;
2463  int ret, i, referenced, num_jobs;
2464  enum AVDiscard skip_thresh;
2465  VP8Frame *av_uninit(curframe), *prev_frame;
2466 
2468 
2469  if (is_vp7)
2470  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2471  else
2472  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2473 
2474  if (ret < 0)
2475  goto err;
2476 
2477  prev_frame = s->framep[VP56_FRAME_CURRENT];
2478 
2479  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2481 
2482  skip_thresh = !referenced ? AVDISCARD_NONREF
2483  : !s->keyframe ? AVDISCARD_NONKEY
2484  : AVDISCARD_ALL;
2485 
2486  if (avctx->skip_frame >= skip_thresh) {
2487  s->invisible = 1;
2488  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2489  goto skip_decode;
2490  }
2491  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2492 
2493  // release no longer referenced frames
2494  for (i = 0; i < 5; i++)
2495  if (s->frames[i].tf.f->data[0] &&
2496  &s->frames[i] != prev_frame &&
2497  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2498  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2499  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2500  vp8_release_frame(s, &s->frames[i]);
2501 
2502  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2503 
2504  /* Given that arithmetic probabilities are updated every frame, it's quite
2505  * likely that the values we have on a random interframe are complete
2506  * junk if we didn't start decode on a keyframe. So just don't display
2507  * anything rather than junk. */
2508  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2509  !s->framep[VP56_FRAME_GOLDEN] ||
2510  !s->framep[VP56_FRAME_GOLDEN2])) {
2511  av_log(avctx, AV_LOG_WARNING,
2512  "Discarding interframe without a prior keyframe!\n");
2513  ret = AVERROR_INVALIDDATA;
2514  goto err;
2515  }
2516 
2517  curframe->tf.f->key_frame = s->keyframe;
2518  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2520  if ((ret = vp8_alloc_frame(s, curframe, referenced))) {
2521  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
2522  goto err;
2523  }
2524 
2525  // check if golden and altref are swapped
2526  if (s->update_altref != VP56_FRAME_NONE)
2528  else
2530 
2531  if (s->update_golden != VP56_FRAME_NONE)
2533  else
2535 
2536  if (s->update_last)
2537  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2538  else
2540 
2541  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2542 
2543  ff_thread_finish_setup(avctx);
2544 
2545  s->linesize = curframe->tf.f->linesize[0];
2546  s->uvlinesize = curframe->tf.f->linesize[1];
2547 
2548  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2549  /* Zero macroblock structures for top/top-left prediction
2550  * from outside the frame. */
2551  if (!s->mb_layout)
2552  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2553  (s->mb_width + 1) * sizeof(*s->macroblocks));
2554  if (!s->mb_layout && s->keyframe)
2555  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2556 
2557  memset(s->ref_count, 0, sizeof(s->ref_count));
2558 
2559  if (s->mb_layout == 1) {
2560  // Make sure the previous frame has read its segmentation map,
2561  // if we re-use the same map.
2562  if (prev_frame && s->segmentation.enabled &&
2564  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2565  if (is_vp7)
2566  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2567  else
2568  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2569  }
2570 
2571  if (avctx->active_thread_type == FF_THREAD_FRAME)
2572  num_jobs = 1;
2573  else
2574  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2575  s->num_jobs = num_jobs;
2576  s->curframe = curframe;
2577  s->prev_frame = prev_frame;
2578  s->mv_min.y = -MARGIN;
2579  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2580  for (i = 0; i < MAX_THREADS; i++) {
2581  s->thread_data[i].thread_mb_pos = 0;
2582  s->thread_data[i].wait_mb_pos = INT_MAX;
2583  }
2584  if (is_vp7)
2586  num_jobs);
2587  else
2589  num_jobs);
2590 
2591  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2592  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2593 
2594 skip_decode:
2595  // if future frames don't use the updated probabilities,
2596  // reset them to the values we saved
2597  if (!s->update_probabilities)
2598  s->prob[0] = s->prob[1];
2599 
2600  if (!s->invisible) {
2601  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2602  return ret;
2603  *got_frame = 1;
2604  }
2605 
2606  return avpkt->size;
2607 err:
2608  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2609  return ret;
2610 }
2611 
2612 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2613  AVPacket *avpkt)
2614 {
2615  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2616 }
2617 
2618 #if CONFIG_VP7_DECODER
2619 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2620  AVPacket *avpkt)
2621 {
2622  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2623 }
2624 #endif /* CONFIG_VP7_DECODER */
2625 
2627 {
2628  VP8Context *s = avctx->priv_data;
2629  int i;
2630 
2631  vp8_decode_flush_impl(avctx, 1);
2632  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2633  av_frame_free(&s->frames[i].tf.f);
2634 
2635  return 0;
2636 }
2637 
2639 {
2640  int i;
2641  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2642  s->frames[i].tf.f = av_frame_alloc();
2643  if (!s->frames[i].tf.f)
2644  return AVERROR(ENOMEM);
2645  }
2646  return 0;
2647 }
2648 
2649 static av_always_inline
2650 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2651 {
2652  VP8Context *s = avctx->priv_data;
2653  int ret;
2654 
2655  s->avctx = avctx;
2656  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2657  avctx->internal->allocate_progress = 1;
2658 
2659  ff_videodsp_init(&s->vdsp, 8);
2660 
2661  ff_vp78dsp_init(&s->vp8dsp);
2662  if (CONFIG_VP7_DECODER && is_vp7) {
2664  ff_vp7dsp_init(&s->vp8dsp);
2665  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2667  ff_vp8dsp_init(&s->vp8dsp);
2668  }
2669 
2670  /* does not change for VP8 */
2671  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
2672 
2673  if ((ret = vp8_init_frames(s)) < 0) {
2674  ff_vp8_decode_free(avctx);
2675  return ret;
2676  }
2677 
2678  return 0;
2679 }
2680 
2681 #if CONFIG_VP7_DECODER
2682 static int vp7_decode_init(AVCodecContext *avctx)
2683 {
2684  return vp78_decode_init(avctx, IS_VP7);
2685 }
2686 #endif /* CONFIG_VP7_DECODER */
2687 
2689 {
2690  return vp78_decode_init(avctx, IS_VP8);
2691 }
2692 
2693 #if CONFIG_VP8_DECODER
2694 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2695 {
2696  VP8Context *s = avctx->priv_data;
2697  int ret;
2698 
2699  s->avctx = avctx;
2700 
2701  if ((ret = vp8_init_frames(s)) < 0) {
2702  ff_vp8_decode_free(avctx);
2703  return ret;
2704  }
2705 
2706  return 0;
2707 }
2708 
2709 #define REBASE(pic) pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
2710 
2711 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2712  const AVCodecContext *src)
2713 {
2714  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2715  int i;
2716 
2717  if (s->macroblocks_base &&
2718  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2719  free_buffers(s);
2720  s->mb_width = s_src->mb_width;
2721  s->mb_height = s_src->mb_height;
2722  }
2723 
2724  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2725  s->segmentation = s_src->segmentation;
2726  s->lf_delta = s_src->lf_delta;
2727  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2728 
2729  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2730  if (s_src->frames[i].tf.f->data[0]) {
2731  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2732  if (ret < 0)
2733  return ret;
2734  }
2735  }
2736 
2737  s->framep[0] = REBASE(s_src->next_framep[0]);
2738  s->framep[1] = REBASE(s_src->next_framep[1]);
2739  s->framep[2] = REBASE(s_src->next_framep[2]);
2740  s->framep[3] = REBASE(s_src->next_framep[3]);
2741 
2742  return 0;
2743 }
2744 #endif /* CONFIG_VP8_DECODER */
2745 
2746 #if CONFIG_VP7_DECODER
2747 AVCodec ff_vp7_decoder = {
2748  .name = "vp7",
2749  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2750  .type = AVMEDIA_TYPE_VIDEO,
2751  .id = AV_CODEC_ID_VP7,
2752  .priv_data_size = sizeof(VP8Context),
2753  .init = vp7_decode_init,
2755  .decode = vp7_decode_frame,
2756  .capabilities = CODEC_CAP_DR1,
2758 };
2759 #endif /* CONFIG_VP7_DECODER */
2760 
2761 #if CONFIG_VP8_DECODER
2762 AVCodec ff_vp8_decoder = {
2763  .name = "vp8",
2764  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2765  .type = AVMEDIA_TYPE_VIDEO,
2766  .id = AV_CODEC_ID_VP8,
2767  .priv_data_size = sizeof(VP8Context),
2773  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2774  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2775 };
2776 #endif /* CONFIG_VP7_DECODER */
static void get_quants(VP8Context *s)
Definition: vp8.c:290
uint8_t golden
Definition: vp8.h:235
uint8_t inner_limit
Definition: vp8.h:80
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:178
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:719
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1547
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:786
discard all frames except keyframes
Definition: avcodec.h:567
Definition: vp9.h:55
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:752
#define DC_128_PRED8x8
Definition: h264pred.h:76
(only used in prediction) no split MVs
Definition: vp8.h:75
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:106
int size
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:221
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
struct VP8Context::@66 segmentation
Base parameters for segmentation, i.e.
VP56mv mv_min
Definition: vp8.h:153
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:156
static av_unused void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
Definition: w32pthreads.h:149
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1244
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1273
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:386
uint8_t feature_value[4][4]
Definition: vp8.h:293
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:133
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:180
uint8_t mbskip_enabled
Definition: vp8.h:151
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:336
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1295
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:238
uint8_t scan[16]
Definition: vp8.h:240
int linesize
Definition: vp8.h:146
int size
Definition: avcodec.h:974
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:112
#define MARGIN
Definition: vp8.c:2149
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1270
VP56mv bmv[16]
Definition: vp8.h:96
#define AV_RL16
Definition: intreadwrite.h:42
uint8_t inner_filter
Definition: vp8.h:81
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:231
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:811
discard all
Definition: avcodec.h:568
struct VP8Context::@70 prob[2]
These are all of the updatable probabilities for binary decisions.
static void copy_luma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:411
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:2812
uint8_t sharpness
Definition: vp8.h:175
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
2 16x8 blocks (vertical)
Definition: vp8.h:71
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:253
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2356
VP8Frame * framep[4]
Definition: vp8.h:139
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1307
static const uint8_t zigzag_scan[16]
Definition: h264data.h:54
#define VP7_MVC_SIZE
Definition: vp8.c:377
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:779
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1331
uint8_t(* top_nnz)[9]
Definition: vp8.h:220
int num_jobs
Definition: vp8.h:267
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Definition: avcodec.h:2743
#define MAX_THREADS
Definition: mpegvideo.h:66
#define AV_RN32A(p)
Definition: intreadwrite.h:446
uint8_t pred16x16[4]
Definition: vp8.h:236
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:167
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:145
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int16_t y
Definition: vp56.h:67
static int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:750
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:246
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:94
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1433
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:104
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:190
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:278
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:897
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:117
uint8_t ref_frame
Definition: vp8.h:89
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1511
Multithreading support functions.
#define b
Definition: input.c:52
Definition: vp9.h:54
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2612
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
uint8_t mvc[2][19]
Definition: vp8.h:239
VP56mv mv
Definition: vp8.h:95
int8_t base_quant[4]
Definition: vp8.h:168
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:741
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:245
const char data[16]
Definition: mxf.c:70
uint8_t * data
Definition: avcodec.h:973
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2349
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:79
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:200
VP8Frame * prev_frame
Definition: vp8.h:142
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:259
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:264
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
uint8_t feature_index_prob[4][3]
Definition: vp8.h:292
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:93
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2459
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:181
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
#define r
Definition: input.c:51
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:672
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:891
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
H264PredContext hpc
Definition: vp8.h:263
Definition: vp8.h:130
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1828
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:166
uint16_t mb_width
Definition: vp8.h:144
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:763
#define FF_SIGNBIT(x)
Definition: internal.h:38
uint8_t last
Definition: vp8.h:234
static const int sizes[][2]
Definition: img2dec.c:46
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:620
uint8_t mode
Definition: vp8.h:88
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1476
static int pthread_mutex_init(pthread_mutex_t *m, void *attr)
Definition: w32pthreads.h:117
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2451
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:145
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2575
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:138
#define EDGE_EMU_LINESIZE
Definition: vp8.h:125
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:169
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:285
VideoDSPContext vdsp
Definition: vp8.h:261
const char * name
Name of the codec implementation.
Definition: avcodec.h:2819
VP8Macroblock * macroblocks_base
Definition: vp8.h:243
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1790
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:126
int16_t block[6][4][16]
Definition: vp8.h:100
struct VP8Context::@69 lf_delta
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1213
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:271
#define FFMAX(a, b)
Definition: common.h:55
uint8_t keyframe
Definition: vp8.h:149
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:185
VP56Frame
Definition: vp56.h:39
int16_t luma_qmul[2]
Definition: vp8.h:190
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:58
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
static int pthread_mutex_destroy(pthread_mutex_t *m)
Definition: w32pthreads.h:122
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
Definition: w32pthreads.h:173
4x4 blocks of 4x4px each
Definition: vp8.h:74
uint8_t deblock_filter
Definition: vp8.h:150
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2567
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:57
uint8_t feature_present_prob[4]
Definition: vp8.h:291
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1742
int16_t block_dc[16]
Definition: vp8.h:101
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:325
int width
picture width / height.
Definition: avcodec.h:1229
uint8_t mbskip
Definition: vp8.h:232
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:216
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2638
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:41
int32_t
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2231
static av_unused int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:313
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2568
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:191
int16_t chroma_qmul[2]
Definition: vp8.h:192
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
#define AV_RL32
Definition: intreadwrite.h:146
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:219
ThreadFrame tf
Definition: vp8.h:131
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:1997
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:779
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:774
#define vp56_rac_get_prob
Definition: vp56.h:243
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:99
#define CONFIG_VP8_DECODER
Definition: config.h:623
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1346
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2189
uint8_t segment
Definition: vp8.h:92
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2556
if(ac->has_optimized_func)
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2445
#define IS_VP8
Definition: vp8dsp.h:103
static const int8_t mv[256][2]
Definition: 4xm.c:75
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2183
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1485
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:260
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
NULL
Definition: eval.c:55
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
static int width
Definition: utils.c:156
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1851
VP8Frame * curframe
Definition: vp8.h:141
uint8_t simple
Definition: vp8.h:173
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:265
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:174
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:74
AVBufferRef * seg_map
Definition: vp8.h:132
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:798
main external API structure.
Definition: avcodec.h:1050
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:436
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:140
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:273
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:116
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:222
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:260
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:245
int coded_height
Definition: avcodec.h:1244
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:195
int index
Definition: gxfenc.c:72
struct VP8Context::@67 filter
VP8FilterStrength * filter_strength
Definition: vp8.h:127
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:741
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1467
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:361
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:165
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1935
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:380
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1660
int uvlinesize
Definition: vp8.h:147
static void update_refs(VP8Context *s)
Definition: vp8.c:400
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:393
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
VP56mv mv_max
Definition: vp8.h:154
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:81
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:730
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
static const int8_t vp8_segmentid_tree[][2]
Definition: vp8data.h:319
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1088
#define DC_127_PRED8x8
Definition: h264pred.h:85
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:65
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2688
#define AV_RL24
Definition: intreadwrite.h:78
int update_altref
Definition: vp8.h:247
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:290
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:207
2 8x16 blocks (horizontal)
Definition: vp8.h:72
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2626
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: vp8.c:1421
Definition: vp9.h:56
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
uint8_t pred8x8c[3]
Definition: vp8.h:237
int height
Definition: gxfenc.c:72
discard all non reference
Definition: avcodec.h:565
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2151
uint8_t partitioning
Definition: vp8.h:90
#define AV_ZERO64(d)
Definition: intreadwrite.h:538
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1123
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:66
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:759
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:755
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1904
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2235
int wait_mb_pos
Definition: vp8.h:123
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1797
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:61
uint8_t chroma_pred_mode
Definition: vp8.h:91
struct VP8Context::@68 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2729
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:297
int invisible
Definition: vp8.h:244
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:796
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:157
void * priv_data
Definition: avcodec.h:1092
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1501
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:62
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:878
#define XCHG(a, b, xchg)
#define CONFIG_VP7_DECODER
Definition: config.h:622
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2616
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2232
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1100
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:262
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:142
int thread_nr
Definition: vp8.h:117
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2416
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:560
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:347
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
#define av_uninit(x)
Definition: attributes.h:109
static void fade(uint8_t *dst, uint8_t *src, int width, int height, int linesize, int alpha, int beta)
Definition: vp8.c:422
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1684
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2030
#define IS_VP7
Definition: vp8dsp.h:102
#define av_always_inline
Definition: attributes.h:40
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:169
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:117
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:60
uint8_t intra
Definition: vp8.h:233
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:988
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:109
uint8_t skip
Definition: vp8.h:85
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:352
This structure stores compressed data.
Definition: avcodec.h:950
#define VP8_MVC_SIZE
Definition: vp8.c:378
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:471
uint8_t profile
Definition: vp8.h:152
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:850
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
VP8ThreadData * thread_data
Definition: vp8.h:137
Predicted.
Definition: avutil.h:254
int thread_mb_pos
Definition: vp8.h:122
2x2 blocks of 8x8px each
Definition: vp8.h:73
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2117
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:824
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2650
#define AV_WN64(p, v)
Definition: intreadwrite.h:342
uint8_t filter_level
Definition: vp8.h:79
static int16_t block[64]
Definition: dct-test.c:88