Libav
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of Libav.
11  *
12  * Libav is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * Libav is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with Libav; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "rectangle.h"
32 #include "thread.h"
33 #include "vp8.h"
34 #include "vp8data.h"
35 
36 #if ARCH_ARM
37 # include "arm/vp8.h"
38 #endif
39 
40 static void free_buffers(VP8Context *s)
41 {
42  int i;
43  if (s->thread_data)
44  for (i = 0; i < MAX_THREADS; i++) {
45 #if HAVE_THREADS
46  pthread_cond_destroy(&s->thread_data[i].cond);
48 #endif
50  }
51  av_freep(&s->thread_data);
54  av_freep(&s->top_nnz);
55  av_freep(&s->top_border);
56 
57  s->macroblocks = NULL;
58 }
59 
60 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
61 {
62  int ret;
63  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
64  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
65  return ret;
66  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
68  return AVERROR(ENOMEM);
69  }
70  return 0;
71 }
72 
74 {
77 }
78 
79 #if CONFIG_VP8_DECODER
80 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
81 {
82  int ret;
83 
84  vp8_release_frame(s, dst);
85 
86  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
87  return ret;
88  if (src->seg_map &&
89  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
90  vp8_release_frame(s, dst);
91  return AVERROR(ENOMEM);
92  }
93 
94  return 0;
95 }
96 #endif /* CONFIG_VP8_DECODER */
97 
98 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
99 {
100  VP8Context *s = avctx->priv_data;
101  int i;
102 
103  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
104  vp8_release_frame(s, &s->frames[i]);
105  memset(s->framep, 0, sizeof(s->framep));
106 
107  if (free_mem)
108  free_buffers(s);
109 }
110 
111 static void vp8_decode_flush(AVCodecContext *avctx)
112 {
113  vp8_decode_flush_impl(avctx, 0);
114 }
115 
117 {
118  VP8Frame *frame = NULL;
119  int i;
120 
121  // find a free buffer
122  for (i = 0; i < 5; i++)
123  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
124  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
125  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
126  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
127  frame = &s->frames[i];
128  break;
129  }
130  if (i == 5) {
131  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
132  abort();
133  }
134  if (frame->tf.f->data[0])
135  vp8_release_frame(s, frame);
136 
137  return frame;
138 }
139 
140 static av_always_inline
141 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
142 {
143  AVCodecContext *avctx = s->avctx;
144  int i, ret;
145 
146  if (width != s->avctx->width ||
147  height != s->avctx->height) {
149 
150  ret = ff_set_dimensions(s->avctx, width, height);
151  if (ret < 0)
152  return ret;
153  }
154 
155  s->mb_width = (s->avctx->coded_width + 15) / 16;
156  s->mb_height = (s->avctx->coded_height + 15) / 16;
157 
158  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
159  FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1;
160  if (!s->mb_layout) { // Frame threading and one thread
161  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
162  sizeof(*s->macroblocks));
164  } else // Sliced threading
165  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
166  sizeof(*s->macroblocks));
167  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
168  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
170 
171  for (i = 0; i < MAX_THREADS; i++) {
173  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
174 #if HAVE_THREADS
175  pthread_mutex_init(&s->thread_data[i].lock, NULL);
176  pthread_cond_init(&s->thread_data[i].cond, NULL);
177 #endif
178  }
179 
180  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
181  (!s->intra4x4_pred_mode_top && !s->mb_layout))
182  return AVERROR(ENOMEM);
183 
184  s->macroblocks = s->macroblocks_base + 1;
185 
186  return 0;
187 }
188 
190 {
191  return update_dimensions(s, width, height, IS_VP7);
192 }
193 
195 {
196  return update_dimensions(s, width, height, IS_VP8);
197 }
198 
200 {
201  VP56RangeCoder *c = &s->c;
202  int i;
203 
205 
206  if (vp8_rac_get(c)) { // update segment feature data
208 
209  for (i = 0; i < 4; i++)
211 
212  for (i = 0; i < 4; i++)
214  }
215  if (s->segmentation.update_map)
216  for (i = 0; i < 3; i++)
217  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
218 }
219 
221 {
222  VP56RangeCoder *c = &s->c;
223  int i;
224 
225  for (i = 0; i < 4; i++) {
226  if (vp8_rac_get(c)) {
227  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
228 
229  if (vp8_rac_get(c))
230  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
231  }
232  }
233 
234  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
235  if (vp8_rac_get(c)) {
236  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
237 
238  if (vp8_rac_get(c))
239  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
240  }
241  }
242 }
243 
244 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
245 {
246  const uint8_t *sizes = buf;
247  int i;
248 
249  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
250 
251  buf += 3 * (s->num_coeff_partitions - 1);
252  buf_size -= 3 * (s->num_coeff_partitions - 1);
253  if (buf_size < 0)
254  return -1;
255 
256  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
257  int size = AV_RL24(sizes + 3 * i);
258  if (buf_size - size < 0)
259  return -1;
260 
261  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
262  buf += size;
263  buf_size -= size;
264  }
265  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
266 
267  return 0;
268 }
269 
270 static void vp7_get_quants(VP8Context *s)
271 {
272  VP56RangeCoder *c = &s->c;
273 
274  int yac_qi = vp8_rac_get_uint(c, 7);
275  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
276  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
277  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
278  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
279  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
280 
281  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
282  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
283  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
284  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
285  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
286  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
287 }
288 
289 static void get_quants(VP8Context *s)
290 {
291  VP56RangeCoder *c = &s->c;
292  int i, base_qi;
293 
294  int yac_qi = vp8_rac_get_uint(c, 7);
295  int ydc_delta = vp8_rac_get_sint(c, 4);
296  int y2dc_delta = vp8_rac_get_sint(c, 4);
297  int y2ac_delta = vp8_rac_get_sint(c, 4);
298  int uvdc_delta = vp8_rac_get_sint(c, 4);
299  int uvac_delta = vp8_rac_get_sint(c, 4);
300 
301  for (i = 0; i < 4; i++) {
302  if (s->segmentation.enabled) {
303  base_qi = s->segmentation.base_quant[i];
304  if (!s->segmentation.absolute_vals)
305  base_qi += yac_qi;
306  } else
307  base_qi = yac_qi;
308 
309  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
310  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
311  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
312  /* 101581>>16 is equivalent to 155/100 */
313  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
314  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
315  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
316 
317  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
318  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
319  }
320 }
321 
335 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
336 {
337  VP56RangeCoder *c = &s->c;
338 
339  if (update)
340  return VP56_FRAME_CURRENT;
341 
342  switch (vp8_rac_get_uint(c, 2)) {
343  case 1:
344  return VP56_FRAME_PREVIOUS;
345  case 2:
347  }
348  return VP56_FRAME_NONE;
349 }
350 
352 {
353  int i, j;
354  for (i = 0; i < 4; i++)
355  for (j = 0; j < 16; j++)
356  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
357  sizeof(s->prob->token[i][j]));
358 }
359 
361 {
362  VP56RangeCoder *c = &s->c;
363  int i, j, k, l, m;
364 
365  for (i = 0; i < 4; i++)
366  for (j = 0; j < 8; j++)
367  for (k = 0; k < 3; k++)
368  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
370  int prob = vp8_rac_get_uint(c, 8);
371  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
372  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
373  }
374 }
375 
376 #define VP7_MVC_SIZE 17
377 #define VP8_MVC_SIZE 19
378 
380  int mvc_size)
381 {
382  VP56RangeCoder *c = &s->c;
383  int i, j;
384 
385  if (vp8_rac_get(c))
386  for (i = 0; i < 4; i++)
387  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
388  if (vp8_rac_get(c))
389  for (i = 0; i < 3; i++)
390  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
391 
392  // 17.2 MV probability update
393  for (i = 0; i < 2; i++)
394  for (j = 0; j < mvc_size; j++)
396  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
397 }
398 
399 static void update_refs(VP8Context *s)
400 {
401  VP56RangeCoder *c = &s->c;
402 
403  int update_golden = vp8_rac_get(c);
404  int update_altref = vp8_rac_get(c);
405 
406  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
407  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
408 }
409 
410 static void copy_luma(AVFrame *dst, AVFrame *src, int width, int height)
411 {
412  int i, j;
413 
414  for (j = 1; j < 3; j++) {
415  for (i = 0; i < height / 2; i++)
416  memcpy(dst->data[j] + i * dst->linesize[j],
417  src->data[j] + i * src->linesize[j], width / 2);
418  }
419 }
420 
421 static void fade(uint8_t *dst, uint8_t *src,
422  int width, int height, int linesize,
423  int alpha, int beta)
424 {
425  int i, j;
426 
427  for (j = 0; j < height; j++) {
428  for (i = 0; i < width; i++) {
429  uint8_t y = src[j * linesize + i];
430  dst[j * linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
431  }
432  }
433 }
434 
436 {
437  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
438  int beta = (int8_t) vp8_rac_get_uint(c, 8);
439  int ret;
440 
441  if (!s->keyframe && (alpha || beta)) {
442  int width = s->mb_width * 16;
443  int height = s->mb_height * 16;
444  AVFrame *src, *dst;
445 
446  if (!s->framep[VP56_FRAME_PREVIOUS])
447  return AVERROR_INVALIDDATA;
448 
449  dst =
450  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
451 
452  /* preserve the golden frame, write a new previous frame */
455  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
456  return ret;
457 
458  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
459 
460  copy_luma(dst, src, width, height);
461  }
462 
463  fade(dst->data[0], src->data[0],
464  width, height, dst->linesize[0], alpha, beta);
465  }
466 
467  return 0;
468 }
469 
470 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
471 {
472  VP56RangeCoder *c = &s->c;
473  int part1_size, hscale, vscale, i, j, ret;
474  int width = s->avctx->width;
475  int height = s->avctx->height;
476 
477  if (buf_size < 4) {
478  return AVERROR_INVALIDDATA;
479  }
480 
481  s->profile = (buf[0] >> 1) & 7;
482  if (s->profile > 1) {
483  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
484  return AVERROR_INVALIDDATA;
485  }
486 
487  s->keyframe = !(buf[0] & 1);
488  s->invisible = 0;
489  part1_size = AV_RL24(buf) >> 4;
490 
491  buf += 4 - s->profile;
492  buf_size -= 4 - s->profile;
493 
494  if (buf_size < part1_size) {
495  return AVERROR_INVALIDDATA;
496  }
497 
498  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
499 
500  ff_vp56_init_range_decoder(c, buf, part1_size);
501  buf += part1_size;
502  buf_size -= part1_size;
503 
504  /* A. Dimension information (keyframes only) */
505  if (s->keyframe) {
506  width = vp8_rac_get_uint(c, 12);
507  height = vp8_rac_get_uint(c, 12);
508  hscale = vp8_rac_get_uint(c, 2);
509  vscale = vp8_rac_get_uint(c, 2);
510  if (hscale || vscale)
511  avpriv_request_sample(s->avctx, "Upscaling");
512 
516  sizeof(s->prob->pred16x16));
518  sizeof(s->prob->pred8x8c));
519  for (i = 0; i < 2; i++)
520  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
521  sizeof(vp7_mv_default_prob[i]));
522  memset(&s->segmentation, 0, sizeof(s->segmentation));
523  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
524  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
525  }
526 
527  if (s->keyframe || s->profile > 0)
528  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
529 
530  /* B. Decoding information for all four macroblock-level features */
531  for (i = 0; i < 4; i++) {
532  s->feature_enabled[i] = vp8_rac_get(c);
533  if (s->feature_enabled[i]) {
535 
536  for (j = 0; j < 3; j++)
537  s->feature_index_prob[i][j] =
538  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
539 
540  if (vp7_feature_value_size[s->profile][i])
541  for (j = 0; j < 4; j++)
542  s->feature_value[i][j] =
544  }
545  }
546 
547  s->segmentation.enabled = 0;
548  s->segmentation.update_map = 0;
549  s->lf_delta.enabled = 0;
550 
551  s->num_coeff_partitions = 1;
552  ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
553 
554  if (!s->macroblocks_base || /* first frame */
555  width != s->avctx->width || height != s->avctx->height ||
556  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
557  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
558  return ret;
559  }
560 
561  /* C. Dequantization indices */
562  vp7_get_quants(s);
563 
564  /* D. Golden frame update flag (a Flag) for interframes only */
565  if (!s->keyframe) {
568  }
569 
570  s->update_last = 1;
571  s->update_probabilities = 1;
572  s->fade_present = 1;
573 
574  if (s->profile > 0) {
576  if (!s->update_probabilities)
577  s->prob[1] = s->prob[0];
578 
579  if (!s->keyframe)
580  s->fade_present = vp8_rac_get(c);
581  }
582 
583  /* E. Fading information for previous frame */
584  if (s->fade_present && vp8_rac_get(c)) {
585  if ((ret = vp7_fade_frame(s ,c)) < 0)
586  return ret;
587  }
588 
589  /* F. Loop filter type */
590  if (!s->profile)
591  s->filter.simple = vp8_rac_get(c);
592 
593  /* G. DCT coefficient ordering specification */
594  if (vp8_rac_get(c))
595  for (i = 1; i < 16; i++)
596  s->prob[0].scan[i] = zigzag_scan[vp8_rac_get_uint(c, 4)];
597 
598  /* H. Loop filter levels */
599  if (s->profile > 0)
600  s->filter.simple = vp8_rac_get(c);
601  s->filter.level = vp8_rac_get_uint(c, 6);
602  s->filter.sharpness = vp8_rac_get_uint(c, 3);
603 
604  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
606 
607  s->mbskip_enabled = 0;
608 
609  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
610  if (!s->keyframe) {
611  s->prob->intra = vp8_rac_get_uint(c, 8);
612  s->prob->last = vp8_rac_get_uint(c, 8);
614  }
615 
616  return 0;
617 }
618 
619 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
620 {
621  VP56RangeCoder *c = &s->c;
622  int header_size, hscale, vscale, ret;
623  int width = s->avctx->width;
624  int height = s->avctx->height;
625 
626  s->keyframe = !(buf[0] & 1);
627  s->profile = (buf[0]>>1) & 7;
628  s->invisible = !(buf[0] & 0x10);
629  header_size = AV_RL24(buf) >> 5;
630  buf += 3;
631  buf_size -= 3;
632 
633  if (s->profile > 3)
634  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
635 
636  if (!s->profile)
638  sizeof(s->put_pixels_tab));
639  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
641  sizeof(s->put_pixels_tab));
642 
643  if (header_size > buf_size - 7 * s->keyframe) {
644  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
645  return AVERROR_INVALIDDATA;
646  }
647 
648  if (s->keyframe) {
649  if (AV_RL24(buf) != 0x2a019d) {
651  "Invalid start code 0x%x\n", AV_RL24(buf));
652  return AVERROR_INVALIDDATA;
653  }
654  width = AV_RL16(buf + 3) & 0x3fff;
655  height = AV_RL16(buf + 5) & 0x3fff;
656  hscale = buf[4] >> 6;
657  vscale = buf[6] >> 6;
658  buf += 7;
659  buf_size -= 7;
660 
661  if (hscale || vscale)
662  avpriv_request_sample(s->avctx, "Upscaling");
663 
667  sizeof(s->prob->pred16x16));
669  sizeof(s->prob->pred8x8c));
670  memcpy(s->prob->mvc, vp8_mv_default_prob,
671  sizeof(s->prob->mvc));
672  memset(&s->segmentation, 0, sizeof(s->segmentation));
673  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
674  }
675 
676  ff_vp56_init_range_decoder(c, buf, header_size);
677  buf += header_size;
678  buf_size -= header_size;
679 
680  if (s->keyframe) {
681  if (vp8_rac_get(c))
682  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
683  vp8_rac_get(c); // whether we can skip clamping in dsp functions
684  }
685 
686  if ((s->segmentation.enabled = vp8_rac_get(c)))
688  else
689  s->segmentation.update_map = 0; // FIXME: move this to some init function?
690 
691  s->filter.simple = vp8_rac_get(c);
692  s->filter.level = vp8_rac_get_uint(c, 6);
693  s->filter.sharpness = vp8_rac_get_uint(c, 3);
694 
695  if ((s->lf_delta.enabled = vp8_rac_get(c)))
696  if (vp8_rac_get(c))
697  update_lf_deltas(s);
698 
699  if (setup_partitions(s, buf, buf_size)) {
700  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
701  return AVERROR_INVALIDDATA;
702  }
703 
704  if (!s->macroblocks_base || /* first frame */
705  width != s->avctx->width || height != s->avctx->height)
706  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
707  return ret;
708 
709  get_quants(s);
710 
711  if (!s->keyframe) {
712  update_refs(s);
714  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
715  }
716 
717  // if we aren't saving this frame's probabilities for future frames,
718  // make a copy of the current probabilities
719  if (!(s->update_probabilities = vp8_rac_get(c)))
720  s->prob[1] = s->prob[0];
721 
722  s->update_last = s->keyframe || vp8_rac_get(c);
723 
725 
726  if ((s->mbskip_enabled = vp8_rac_get(c)))
727  s->prob->mbskip = vp8_rac_get_uint(c, 8);
728 
729  if (!s->keyframe) {
730  s->prob->intra = vp8_rac_get_uint(c, 8);
731  s->prob->last = vp8_rac_get_uint(c, 8);
732  s->prob->golden = vp8_rac_get_uint(c, 8);
734  }
735 
736  return 0;
737 }
738 
739 static av_always_inline
740 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
741 {
742  dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
743  dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
744 }
745 
749 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
750 {
751  int bit, x = 0;
752 
753  if (vp56_rac_get_prob_branchy(c, p[0])) {
754  int i;
755 
756  for (i = 0; i < 3; i++)
757  x += vp56_rac_get_prob(c, p[9 + i]) << i;
758  for (i = (vp7 ? 7 : 9); i > 3; i--)
759  x += vp56_rac_get_prob(c, p[9 + i]) << i;
760  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
761  x += 8;
762  } else {
763  // small_mvtree
764  const uint8_t *ps = p + 2;
765  bit = vp56_rac_get_prob(c, *ps);
766  ps += 1 + 3 * bit;
767  x += 4 * bit;
768  bit = vp56_rac_get_prob(c, *ps);
769  ps += 1 + bit;
770  x += 2 * bit;
771  x += vp56_rac_get_prob(c, *ps);
772  }
773 
774  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
775 }
776 
777 static av_always_inline
778 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
779 {
780  if (is_vp7)
781  return vp7_submv_prob;
782 
783  if (left == top)
784  return vp8_submv_prob[4 - !!left];
785  if (!top)
786  return vp8_submv_prob[2];
787  return vp8_submv_prob[1 - !!left];
788 }
789 
794 static av_always_inline
796  int layout, int is_vp7)
797 {
798  int part_idx;
799  int n, num;
800  VP8Macroblock *top_mb;
801  VP8Macroblock *left_mb = &mb[-1];
802  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
803  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
804  VP56mv *top_mv;
805  VP56mv *left_mv = left_mb->bmv;
806  VP56mv *cur_mv = mb->bmv;
807 
808  if (!layout) // layout is inlined, s->mb_layout is not
809  top_mb = &mb[2];
810  else
811  top_mb = &mb[-s->mb_width - 1];
812  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
813  top_mv = top_mb->bmv;
814 
818  else
819  part_idx = VP8_SPLITMVMODE_8x8;
820  } else {
821  part_idx = VP8_SPLITMVMODE_4x4;
822  }
823 
824  num = vp8_mbsplit_count[part_idx];
825  mbsplits_cur = vp8_mbsplits[part_idx],
826  firstidx = vp8_mbfirstidx[part_idx];
827  mb->partitioning = part_idx;
828 
829  for (n = 0; n < num; n++) {
830  int k = firstidx[n];
831  uint32_t left, above;
832  const uint8_t *submv_prob;
833 
834  if (!(k & 3))
835  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
836  else
837  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
838  if (k <= 3)
839  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
840  else
841  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
842 
843  submv_prob = get_submv_prob(left, above, is_vp7);
844 
845  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
846  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
847  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
848  mb->bmv[n].y = mb->mv.y +
849  read_mv_component(c, s->prob->mvc[0], is_vp7);
850  mb->bmv[n].x = mb->mv.x +
851  read_mv_component(c, s->prob->mvc[1], is_vp7);
852  } else {
853  AV_ZERO32(&mb->bmv[n]);
854  }
855  } else {
856  AV_WN32A(&mb->bmv[n], above);
857  }
858  } else {
859  AV_WN32A(&mb->bmv[n], left);
860  }
861  }
862 
863  return num;
864 }
865 
877 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
878  int xoffset, int yoffset, int boundary,
879  int *edge_x, int *edge_y)
880 {
881  int vwidth = mb_width + 1;
882  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
883  if (new < boundary || new % vwidth == vwidth - 1)
884  return 0;
885  *edge_y = new / vwidth;
886  *edge_x = new % vwidth;
887  return 1;
888 }
889 
890 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
891 {
892  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
893 }
894 
895 static av_always_inline
897  int mb_x, int mb_y, int layout)
898 {
899  VP8Macroblock *mb_edge[12];
900  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
901  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
902  int idx = CNT_ZERO;
903  VP56mv near_mv[3];
904  uint8_t cnt[3] = { 0 };
905  VP56RangeCoder *c = &s->c;
906  int i;
907 
908  AV_ZERO32(&near_mv[0]);
909  AV_ZERO32(&near_mv[1]);
910  AV_ZERO32(&near_mv[2]);
911 
912  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
913  const VP7MVPred * pred = &vp7_mv_pred[i];
914  int edge_x, edge_y;
915 
916  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
917  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
918  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
919  ? s->macroblocks_base + 1 + edge_x +
920  (s->mb_width + 1) * (edge_y + 1)
921  : s->macroblocks + edge_x +
922  (s->mb_height - edge_y - 1) * 2;
923  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
924  if (mv) {
925  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
926  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
927  idx = CNT_NEAREST;
928  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
929  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
930  continue;
931  idx = CNT_NEAR;
932  } else {
933  AV_WN32A(&near_mv[CNT_NEAR], mv);
934  idx = CNT_NEAR;
935  }
936  } else {
937  AV_WN32A(&near_mv[CNT_NEAREST], mv);
938  idx = CNT_NEAREST;
939  }
940  } else {
941  idx = CNT_ZERO;
942  }
943  } else {
944  idx = CNT_ZERO;
945  }
946  cnt[idx] += vp7_mv_pred[i].score;
947  }
948 
950 
951  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
952  mb->mode = VP8_MVMODE_MV;
953 
954  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
955 
956  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
957 
958  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
959  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
960  else
961  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
962 
963  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
964  mb->mode = VP8_MVMODE_SPLIT;
965  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
966  } else {
967  mb->mv.y += read_mv_component(c, s->prob->mvc[0], IS_VP7);
968  mb->mv.x += read_mv_component(c, s->prob->mvc[1], IS_VP7);
969  mb->bmv[0] = mb->mv;
970  }
971  } else {
972  mb->mv = near_mv[CNT_NEAR];
973  mb->bmv[0] = mb->mv;
974  }
975  } else {
976  mb->mv = near_mv[CNT_NEAREST];
977  mb->bmv[0] = mb->mv;
978  }
979  } else {
980  mb->mode = VP8_MVMODE_ZERO;
981  AV_ZERO32(&mb->mv);
982  mb->bmv[0] = mb->mv;
983  }
984 }
985 
986 static av_always_inline
988  int mb_x, int mb_y, int layout)
989 {
990  VP8Macroblock *mb_edge[3] = { 0 /* top */,
991  mb - 1 /* left */,
992  0 /* top-left */ };
993  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
994  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
995  int idx = CNT_ZERO;
996  int cur_sign_bias = s->sign_bias[mb->ref_frame];
997  int8_t *sign_bias = s->sign_bias;
998  VP56mv near_mv[4];
999  uint8_t cnt[4] = { 0 };
1000  VP56RangeCoder *c = &s->c;
1001 
1002  if (!layout) { // layout is inlined (s->mb_layout is not)
1003  mb_edge[0] = mb + 2;
1004  mb_edge[2] = mb + 1;
1005  } else {
1006  mb_edge[0] = mb - s->mb_width - 1;
1007  mb_edge[2] = mb - s->mb_width - 2;
1008  }
1009 
1010  AV_ZERO32(&near_mv[0]);
1011  AV_ZERO32(&near_mv[1]);
1012  AV_ZERO32(&near_mv[2]);
1013 
1014  /* Process MB on top, left and top-left */
1015 #define MV_EDGE_CHECK(n) \
1016  { \
1017  VP8Macroblock *edge = mb_edge[n]; \
1018  int edge_ref = edge->ref_frame; \
1019  if (edge_ref != VP56_FRAME_CURRENT) { \
1020  uint32_t mv = AV_RN32A(&edge->mv); \
1021  if (mv) { \
1022  if (cur_sign_bias != sign_bias[edge_ref]) { \
1023  /* SWAR negate of the values in mv. */ \
1024  mv = ~mv; \
1025  mv = ((mv & 0x7fff7fff) + \
1026  0x00010001) ^ (mv & 0x80008000); \
1027  } \
1028  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1029  AV_WN32A(&near_mv[++idx], mv); \
1030  cnt[idx] += 1 + (n != 2); \
1031  } else \
1032  cnt[CNT_ZERO] += 1 + (n != 2); \
1033  } \
1034  }
1035 
1036  MV_EDGE_CHECK(0)
1037  MV_EDGE_CHECK(1)
1038  MV_EDGE_CHECK(2)
1039 
1041  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1042  mb->mode = VP8_MVMODE_MV;
1043 
1044  /* If we have three distinct MVs, merge first and last if they're the same */
1045  if (cnt[CNT_SPLITMV] &&
1046  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1047  cnt[CNT_NEAREST] += 1;
1048 
1049  /* Swap near and nearest if necessary */
1050  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1051  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1052  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1053  }
1054 
1055  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1056  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1057  /* Choose the best mv out of 0,0 and the nearest mv */
1058  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1059  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1060  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1061  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1062 
1063  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1064  mb->mode = VP8_MVMODE_SPLIT;
1065  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1066  } else {
1067  mb->mv.y += read_mv_component(c, s->prob->mvc[0], IS_VP8);
1068  mb->mv.x += read_mv_component(c, s->prob->mvc[1], IS_VP8);
1069  mb->bmv[0] = mb->mv;
1070  }
1071  } else {
1072  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1073  mb->bmv[0] = mb->mv;
1074  }
1075  } else {
1076  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1077  mb->bmv[0] = mb->mv;
1078  }
1079  } else {
1080  mb->mode = VP8_MVMODE_ZERO;
1081  AV_ZERO32(&mb->mv);
1082  mb->bmv[0] = mb->mv;
1083  }
1084 }
1085 
1086 static av_always_inline
1088  int mb_x, int keyframe, int layout)
1089 {
1090  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1091 
1092  if (layout == 1) {
1093  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1094  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1095  }
1096  if (keyframe) {
1097  int x, y;
1098  uint8_t *top;
1099  uint8_t *const left = s->intra4x4_pred_mode_left;
1100  if (layout == 1)
1101  top = mb->intra4x4_pred_mode_top;
1102  else
1103  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1104  for (y = 0; y < 4; y++) {
1105  for (x = 0; x < 4; x++) {
1106  const uint8_t *ctx;
1107  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1108  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1109  left[y] = top[x] = *intra4x4;
1110  intra4x4++;
1111  }
1112  }
1113  } else {
1114  int i;
1115  for (i = 0; i < 16; i++)
1116  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1118  }
1119 }
1120 
1121 static av_always_inline
1122 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1123  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1124 {
1125  VP56RangeCoder *c = &s->c;
1126  const char *vp7_feature_name[] = { "q-index",
1127  "lf-delta",
1128  "partial-golden-update",
1129  "blit-pitch" };
1130  if (is_vp7) {
1131  int i;
1132  *segment = 0;
1133  for (i = 0; i < 4; i++) {
1134  if (s->feature_enabled[i]) {
1135  if (vp56_rac_get_prob(c, s->feature_present_prob[i])) {
1137  s->feature_index_prob[i]);
1139  "Feature %s present in macroblock (value 0x%x)\n",
1140  vp7_feature_name[i], s->feature_value[i][index]);
1141  }
1142  }
1143  }
1144  } else if (s->segmentation.update_map)
1145  *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
1146  else if (s->segmentation.enabled)
1147  *segment = ref ? *ref : *segment;
1148  mb->segment = *segment;
1149 
1150  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1151 
1152  if (s->keyframe) {
1155 
1156  if (mb->mode == MODE_I4x4) {
1157  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1158  } else {
1159  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1160  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1161  if (s->mb_layout == 1)
1162  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1163  else
1164  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1165  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1166  }
1167 
1171  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1172  // inter MB, 16.2
1173  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1174  mb->ref_frame =
1175  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1177  else
1179  s->ref_count[mb->ref_frame - 1]++;
1180 
1181  // motion vectors, 16.3
1182  if (is_vp7)
1183  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1184  else
1185  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1186  } else {
1187  // intra MB, 16.1
1189 
1190  if (mb->mode == MODE_I4x4)
1191  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1192 
1194  s->prob->pred8x8c);
1197  AV_ZERO32(&mb->bmv[0]);
1198  }
1199 }
1200 
1211 static av_always_inline
1213  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1214  int i, uint8_t *token_prob, int16_t qmul[2],
1215  const uint8_t scan[16], int vp7)
1216 {
1217  VP56RangeCoder c = *r;
1218  goto skip_eob;
1219  do {
1220  int coeff;
1221 restart:
1222  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1223  break;
1224 
1225 skip_eob:
1226  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1227  if (++i == 16)
1228  break; // invalid input; blocks should end with EOB
1229  token_prob = probs[i][0];
1230  if (vp7)
1231  goto restart;
1232  goto skip_eob;
1233  }
1234 
1235  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1236  coeff = 1;
1237  token_prob = probs[i + 1][1];
1238  } else {
1239  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1240  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1241  if (coeff)
1242  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1243  coeff += 2;
1244  } else {
1245  // DCT_CAT*
1246  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1247  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1248  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1249  } else { // DCT_CAT2
1250  coeff = 7;
1251  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1252  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1253  }
1254  } else { // DCT_CAT3 and up
1255  int a = vp56_rac_get_prob(&c, token_prob[8]);
1256  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1257  int cat = (a << 1) + b;
1258  coeff = 3 + (8 << cat);
1259  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1260  }
1261  }
1262  token_prob = probs[i + 1][2];
1263  }
1264  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1265  } while (++i < 16);
1266 
1267  *r = c;
1268  return i;
1269 }
1270 
1271 static av_always_inline
1272 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1273 {
1274  int16_t dc = block[0];
1275  int ret = 0;
1276 
1277  if (pred[1] > 3) {
1278  dc += pred[0];
1279  ret = 1;
1280  }
1281 
1282  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1283  block[0] = pred[0] = dc;
1284  pred[1] = 0;
1285  } else {
1286  if (pred[0] == dc)
1287  pred[1]++;
1288  block[0] = pred[0] = dc;
1289  }
1290 
1291  return ret;
1292 }
1293 
1295  int16_t block[16],
1296  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1297  int i, uint8_t *token_prob,
1298  int16_t qmul[2],
1299  const uint8_t scan[16])
1300 {
1301  return decode_block_coeffs_internal(r, block, probs, i,
1302  token_prob, qmul, scan, IS_VP7);
1303 }
1304 
1305 #ifndef vp8_decode_block_coeffs_internal
1307  int16_t block[16],
1308  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1309  int i, uint8_t *token_prob,
1310  int16_t qmul[2])
1311 {
1312  return decode_block_coeffs_internal(r, block, probs, i,
1313  token_prob, qmul, zigzag_scan, IS_VP8);
1314 }
1315 #endif
1316 
1329 static av_always_inline
1331  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1332  int i, int zero_nhood, int16_t qmul[2],
1333  const uint8_t scan[16], int vp7)
1334 {
1335  uint8_t *token_prob = probs[i][zero_nhood];
1336  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1337  return 0;
1338  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1339  token_prob, qmul, scan)
1340  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1341  token_prob, qmul);
1342 }
1343 
1344 static av_always_inline
1346  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1347  int is_vp7)
1348 {
1349  int i, x, y, luma_start = 0, luma_ctx = 3;
1350  int nnz_pred, nnz, nnz_total = 0;
1351  int segment = mb->segment;
1352  int block_dc = 0;
1353 
1354  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1355  nnz_pred = t_nnz[8] + l_nnz[8];
1356 
1357  // decode DC values and do hadamard
1358  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1359  nnz_pred, s->qmat[segment].luma_dc_qmul,
1360  zigzag_scan, is_vp7);
1361  l_nnz[8] = t_nnz[8] = !!nnz;
1362 
1363  if (is_vp7 && mb->mode > MODE_I4x4) {
1364  nnz |= inter_predict_dc(td->block_dc,
1365  s->inter_dc_pred[mb->ref_frame - 1]);
1366  }
1367 
1368  if (nnz) {
1369  nnz_total += nnz;
1370  block_dc = 1;
1371  if (nnz == 1)
1372  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1373  else
1374  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1375  }
1376  luma_start = 1;
1377  luma_ctx = 0;
1378  }
1379 
1380  // luma blocks
1381  for (y = 0; y < 4; y++)
1382  for (x = 0; x < 4; x++) {
1383  nnz_pred = l_nnz[y] + t_nnz[x];
1384  nnz = decode_block_coeffs(c, td->block[y][x],
1385  s->prob->token[luma_ctx],
1386  luma_start, nnz_pred,
1387  s->qmat[segment].luma_qmul,
1388  s->prob[0].scan, is_vp7);
1389  /* nnz+block_dc may be one more than the actual last index,
1390  * but we don't care */
1391  td->non_zero_count_cache[y][x] = nnz + block_dc;
1392  t_nnz[x] = l_nnz[y] = !!nnz;
1393  nnz_total += nnz;
1394  }
1395 
1396  // chroma blocks
1397  // TODO: what to do about dimensions? 2nd dim for luma is x,
1398  // but for chroma it's (y<<1)|x
1399  for (i = 4; i < 6; i++)
1400  for (y = 0; y < 2; y++)
1401  for (x = 0; x < 2; x++) {
1402  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1403  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1404  s->prob->token[2], 0, nnz_pred,
1405  s->qmat[segment].chroma_qmul,
1406  s->prob[0].scan, is_vp7);
1407  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1408  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1409  nnz_total += nnz;
1410  }
1411 
1412  // if there were no coded coeffs despite the macroblock not being marked skip,
1413  // we MUST not do the inner loop filter and should not do IDCT
1414  // Since skip isn't used for bitstream prediction, just manually set it.
1415  if (!nnz_total)
1416  mb->skip = 1;
1417 }
1418 
1419 static av_always_inline
1420 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1421  uint8_t *src_cb, uint8_t *src_cr,
1422  int linesize, int uvlinesize, int simple)
1423 {
1424  AV_COPY128(top_border, src_y + 15 * linesize);
1425  if (!simple) {
1426  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1427  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1428  }
1429 }
1430 
1431 static av_always_inline
1432 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1433  uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
1434  int mb_y, int mb_width, int simple, int xchg)
1435 {
1436  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1437  src_y -= linesize;
1438  src_cb -= uvlinesize;
1439  src_cr -= uvlinesize;
1440 
1441 #define XCHG(a, b, xchg) \
1442  do { \
1443  if (xchg) \
1444  AV_SWAP64(b, a); \
1445  else \
1446  AV_COPY64(b, a); \
1447  } while (0)
1448 
1449  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1450  XCHG(top_border, src_y, xchg);
1451  XCHG(top_border + 8, src_y + 8, 1);
1452  if (mb_x < mb_width - 1)
1453  XCHG(top_border + 32, src_y + 16, 1);
1454 
1455  // only copy chroma for normal loop filter
1456  // or to initialize the top row to 127
1457  if (!simple || !mb_y) {
1458  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1459  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1460  XCHG(top_border + 16, src_cb, 1);
1461  XCHG(top_border + 24, src_cr, 1);
1462  }
1463 }
1464 
1465 static av_always_inline
1466 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1467 {
1468  if (!mb_x)
1469  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1470  else
1471  return mb_y ? mode : LEFT_DC_PRED8x8;
1472 }
1473 
1474 static av_always_inline
1475 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1476 {
1477  if (!mb_x)
1478  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1479  else
1480  return mb_y ? mode : HOR_PRED8x8;
1481 }
1482 
1483 static av_always_inline
1484 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1485 {
1486  switch (mode) {
1487  case DC_PRED8x8:
1488  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1489  case VERT_PRED8x8:
1490  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1491  case HOR_PRED8x8:
1492  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1493  case PLANE_PRED8x8: /* TM */
1494  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1495  }
1496  return mode;
1497 }
1498 
1499 static av_always_inline
1500 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1501 {
1502  if (!mb_x) {
1503  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1504  } else {
1505  return mb_y ? mode : HOR_VP8_PRED;
1506  }
1507 }
1508 
1509 static av_always_inline
1510 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1511  int *copy_buf, int vp7)
1512 {
1513  switch (mode) {
1514  case VERT_PRED:
1515  if (!mb_x && mb_y) {
1516  *copy_buf = 1;
1517  return mode;
1518  }
1519  /* fall-through */
1520  case DIAG_DOWN_LEFT_PRED:
1521  case VERT_LEFT_PRED:
1522  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1523  case HOR_PRED:
1524  if (!mb_y) {
1525  *copy_buf = 1;
1526  return mode;
1527  }
1528  /* fall-through */
1529  case HOR_UP_PRED:
1530  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1531  case TM_VP8_PRED:
1532  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1533  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1534  * as 16x16/8x8 DC */
1535  case DIAG_DOWN_RIGHT_PRED:
1536  case VERT_RIGHT_PRED:
1537  case HOR_DOWN_PRED:
1538  if (!mb_y || !mb_x)
1539  *copy_buf = 1;
1540  return mode;
1541  }
1542  return mode;
1543 }
1544 
1545 static av_always_inline
1547  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1548 {
1549  int x, y, mode, nnz;
1550  uint32_t tr;
1551 
1552  /* for the first row, we need to run xchg_mb_border to init the top edge
1553  * to 127 otherwise, skip it if we aren't going to deblock */
1554  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1555  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1556  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1557  s->filter.simple, 1);
1558 
1559  if (mb->mode < MODE_I4x4) {
1560  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1561  s->hpc.pred16x16[mode](dst[0], s->linesize);
1562  } else {
1563  uint8_t *ptr = dst[0];
1564  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1565  const uint8_t lo = is_vp7 ? 128 : 127;
1566  const uint8_t hi = is_vp7 ? 128 : 129;
1567  uint8_t tr_top[4] = { lo, lo, lo, lo };
1568 
1569  // all blocks on the right edge of the macroblock use bottom edge
1570  // the top macroblock for their topright edge
1571  uint8_t *tr_right = ptr - s->linesize + 16;
1572 
1573  // if we're on the right edge of the frame, said edge is extended
1574  // from the top macroblock
1575  if (mb_y && mb_x == s->mb_width - 1) {
1576  tr = tr_right[-1] * 0x01010101u;
1577  tr_right = (uint8_t *) &tr;
1578  }
1579 
1580  if (mb->skip)
1582 
1583  for (y = 0; y < 4; y++) {
1584  uint8_t *topright = ptr + 4 - s->linesize;
1585  for (x = 0; x < 4; x++) {
1586  int copy = 0, linesize = s->linesize;
1587  uint8_t *dst = ptr + 4 * x;
1588  DECLARE_ALIGNED(4, uint8_t, copy_dst)[5 * 8];
1589 
1590  if ((y == 0 || x == 3) && mb_y == 0) {
1591  topright = tr_top;
1592  } else if (x == 3)
1593  topright = tr_right;
1594 
1595  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1596  mb_y + y, &copy, is_vp7);
1597  if (copy) {
1598  dst = copy_dst + 12;
1599  linesize = 8;
1600  if (!(mb_y + y)) {
1601  copy_dst[3] = lo;
1602  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1603  } else {
1604  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1605  if (!(mb_x + x)) {
1606  copy_dst[3] = hi;
1607  } else {
1608  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1609  }
1610  }
1611  if (!(mb_x + x)) {
1612  copy_dst[11] =
1613  copy_dst[19] =
1614  copy_dst[27] =
1615  copy_dst[35] = hi;
1616  } else {
1617  copy_dst[11] = ptr[4 * x - 1];
1618  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1619  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1620  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1621  }
1622  }
1623  s->hpc.pred4x4[mode](dst, topright, linesize);
1624  if (copy) {
1625  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1626  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1627  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1628  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1629  }
1630 
1631  nnz = td->non_zero_count_cache[y][x];
1632  if (nnz) {
1633  if (nnz == 1)
1634  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1635  td->block[y][x], s->linesize);
1636  else
1637  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1638  td->block[y][x], s->linesize);
1639  }
1640  topright += 4;
1641  }
1642 
1643  ptr += 4 * s->linesize;
1644  intra4x4 += 4;
1645  }
1646  }
1647 
1649  mb_x, mb_y, is_vp7);
1650  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1651  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1652 
1653  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1654  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1655  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1656  s->filter.simple, 0);
1657 }
1658 
1659 static const uint8_t subpel_idx[3][8] = {
1660  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1661  // also function pointer index
1662  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1663  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1664 };
1665 
1682 static av_always_inline
1684  ThreadFrame *ref, const VP56mv *mv,
1685  int x_off, int y_off, int block_w, int block_h,
1686  int width, int height, ptrdiff_t linesize,
1687  vp8_mc_func mc_func[3][3])
1688 {
1689  uint8_t *src = ref->f->data[0];
1690 
1691  if (AV_RN32A(mv)) {
1692  int src_linesize = linesize;
1693 
1694  int mx = (mv->x << 1) & 7, mx_idx = subpel_idx[0][mx];
1695  int my = (mv->y << 1) & 7, my_idx = subpel_idx[0][my];
1696 
1697  x_off += mv->x >> 2;
1698  y_off += mv->y >> 2;
1699 
1700  // edge emulation
1701  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1702  src += y_off * linesize + x_off;
1703  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1704  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1706  src - my_idx * linesize - mx_idx,
1707  EDGE_EMU_LINESIZE, linesize,
1708  block_w + subpel_idx[1][mx],
1709  block_h + subpel_idx[1][my],
1710  x_off - mx_idx, y_off - my_idx,
1711  width, height);
1712  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1713  src_linesize = EDGE_EMU_LINESIZE;
1714  }
1715  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1716  } else {
1717  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1718  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1719  linesize, block_h, 0, 0);
1720  }
1721 }
1722 
1740 static av_always_inline
1742  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1743  int x_off, int y_off, int block_w, int block_h,
1744  int width, int height, ptrdiff_t linesize,
1745  vp8_mc_func mc_func[3][3])
1746 {
1747  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1748 
1749  if (AV_RN32A(mv)) {
1750  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1751  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1752 
1753  x_off += mv->x >> 3;
1754  y_off += mv->y >> 3;
1755 
1756  // edge emulation
1757  src1 += y_off * linesize + x_off;
1758  src2 += y_off * linesize + x_off;
1759  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1760  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1761  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1763  src1 - my_idx * linesize - mx_idx,
1764  EDGE_EMU_LINESIZE, linesize,
1765  block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1766  x_off - mx_idx, y_off - my_idx, width, height);
1767  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1768  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1769 
1771  src2 - my_idx * linesize - mx_idx,
1772  EDGE_EMU_LINESIZE, linesize,
1773  block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1774  x_off - mx_idx, y_off - my_idx, width, height);
1775  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1776  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1777  } else {
1778  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1779  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1780  }
1781  } else {
1782  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1783  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1784  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1785  }
1786 }
1787 
1788 static av_always_inline
1790  ThreadFrame *ref_frame, int x_off, int y_off,
1791  int bx_off, int by_off, int block_w, int block_h,
1792  int width, int height, VP56mv *mv)
1793 {
1794  VP56mv uvmv = *mv;
1795 
1796  /* Y */
1797  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1798  ref_frame, mv, x_off + bx_off, y_off + by_off,
1799  block_w, block_h, width, height, s->linesize,
1800  s->put_pixels_tab[block_w == 8]);
1801 
1802  /* U/V */
1803  if (s->profile == 3) {
1804  /* this block only applies VP8; it is safe to check
1805  * only the profile, as VP7 profile <= 1 */
1806  uvmv.x &= ~7;
1807  uvmv.y &= ~7;
1808  }
1809  x_off >>= 1;
1810  y_off >>= 1;
1811  bx_off >>= 1;
1812  by_off >>= 1;
1813  width >>= 1;
1814  height >>= 1;
1815  block_w >>= 1;
1816  block_h >>= 1;
1817  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1818  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1819  &uvmv, x_off + bx_off, y_off + by_off,
1820  block_w, block_h, width, height, s->uvlinesize,
1821  s->put_pixels_tab[1 + (block_w == 4)]);
1822 }
1823 
1824 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1825  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1826 static av_always_inline
1827 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1828  int mb_xy, int ref)
1829 {
1830  /* Don't prefetch refs that haven't been used very often this frame. */
1831  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1832  int x_off = mb_x << 4, y_off = mb_y << 4;
1833  int mx = (mb->mv.x >> 2) + x_off + 8;
1834  int my = (mb->mv.y >> 2) + y_off;
1835  uint8_t **src = s->framep[ref]->tf.f->data;
1836  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1837  /* For threading, a ff_thread_await_progress here might be useful, but
1838  * it actually slows down the decoder. Since a bad prefetch doesn't
1839  * generate bad decoder output, we don't run it here. */
1840  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1841  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1842  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1843  }
1844 }
1845 
1849 static av_always_inline
1851  VP8Macroblock *mb, int mb_x, int mb_y)
1852 {
1853  int x_off = mb_x << 4, y_off = mb_y << 4;
1854  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1855  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1856  VP56mv *bmv = mb->bmv;
1857 
1858  switch (mb->partitioning) {
1859  case VP8_SPLITMVMODE_NONE:
1860  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1861  0, 0, 16, 16, width, height, &mb->mv);
1862  break;
1863  case VP8_SPLITMVMODE_4x4: {
1864  int x, y;
1865  VP56mv uvmv;
1866 
1867  /* Y */
1868  for (y = 0; y < 4; y++) {
1869  for (x = 0; x < 4; x++) {
1870  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1871  ref, &bmv[4 * y + x],
1872  4 * x + x_off, 4 * y + y_off, 4, 4,
1873  width, height, s->linesize,
1874  s->put_pixels_tab[2]);
1875  }
1876  }
1877 
1878  /* U/V */
1879  x_off >>= 1;
1880  y_off >>= 1;
1881  width >>= 1;
1882  height >>= 1;
1883  for (y = 0; y < 2; y++) {
1884  for (x = 0; x < 2; x++) {
1885  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1886  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1887  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1888  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1889  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1890  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1891  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1892  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1893  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1894  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1895  if (s->profile == 3) {
1896  uvmv.x &= ~7;
1897  uvmv.y &= ~7;
1898  }
1899  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1900  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1901  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1902  width, height, s->uvlinesize,
1903  s->put_pixels_tab[2]);
1904  }
1905  }
1906  break;
1907  }
1908  case VP8_SPLITMVMODE_16x8:
1909  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1910  0, 0, 16, 8, width, height, &bmv[0]);
1911  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1912  0, 8, 16, 8, width, height, &bmv[1]);
1913  break;
1914  case VP8_SPLITMVMODE_8x16:
1915  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1916  0, 0, 8, 16, width, height, &bmv[0]);
1917  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1918  8, 0, 8, 16, width, height, &bmv[1]);
1919  break;
1920  case VP8_SPLITMVMODE_8x8:
1921  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1922  0, 0, 8, 8, width, height, &bmv[0]);
1923  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1924  8, 0, 8, 8, width, height, &bmv[1]);
1925  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1926  0, 8, 8, 8, width, height, &bmv[2]);
1927  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1928  8, 8, 8, 8, width, height, &bmv[3]);
1929  break;
1930  }
1931 }
1932 
1933 static av_always_inline
1935 {
1936  int x, y, ch;
1937 
1938  if (mb->mode != MODE_I4x4) {
1939  uint8_t *y_dst = dst[0];
1940  for (y = 0; y < 4; y++) {
1941  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1942  if (nnz4) {
1943  if (nnz4 & ~0x01010101) {
1944  for (x = 0; x < 4; x++) {
1945  if ((uint8_t) nnz4 == 1)
1946  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1947  td->block[y][x],
1948  s->linesize);
1949  else if ((uint8_t) nnz4 > 1)
1950  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
1951  td->block[y][x],
1952  s->linesize);
1953  nnz4 >>= 8;
1954  if (!nnz4)
1955  break;
1956  }
1957  } else {
1958  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
1959  }
1960  }
1961  y_dst += 4 * s->linesize;
1962  }
1963  }
1964 
1965  for (ch = 0; ch < 2; ch++) {
1966  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
1967  if (nnz4) {
1968  uint8_t *ch_dst = dst[1 + ch];
1969  if (nnz4 & ~0x01010101) {
1970  for (y = 0; y < 2; y++) {
1971  for (x = 0; x < 2; x++) {
1972  if ((uint8_t) nnz4 == 1)
1973  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
1974  td->block[4 + ch][(y << 1) + x],
1975  s->uvlinesize);
1976  else if ((uint8_t) nnz4 > 1)
1977  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
1978  td->block[4 + ch][(y << 1) + x],
1979  s->uvlinesize);
1980  nnz4 >>= 8;
1981  if (!nnz4)
1982  goto chroma_idct_end;
1983  }
1984  ch_dst += 4 * s->uvlinesize;
1985  }
1986  } else {
1987  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
1988  }
1989  }
1990 chroma_idct_end:
1991  ;
1992  }
1993 }
1994 
1995 static av_always_inline
1997  VP8FilterStrength *f, int is_vp7)
1998 {
1999  int interior_limit, filter_level;
2000 
2001  if (s->segmentation.enabled) {
2002  filter_level = s->segmentation.filter_level[mb->segment];
2003  if (!s->segmentation.absolute_vals)
2004  filter_level += s->filter.level;
2005  } else
2006  filter_level = s->filter.level;
2007 
2008  if (s->lf_delta.enabled) {
2009  filter_level += s->lf_delta.ref[mb->ref_frame];
2010  filter_level += s->lf_delta.mode[mb->mode];
2011  }
2012 
2013  filter_level = av_clip_uintp2(filter_level, 6);
2014 
2015  interior_limit = filter_level;
2016  if (s->filter.sharpness) {
2017  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2018  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2019  }
2020  interior_limit = FFMAX(interior_limit, 1);
2021 
2022  f->filter_level = filter_level;
2023  f->inner_limit = interior_limit;
2024  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2025  mb->mode == VP8_MVMODE_SPLIT;
2026 }
2027 
2028 static av_always_inline
2030  int mb_x, int mb_y, int is_vp7)
2031 {
2032  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2033  int filter_level = f->filter_level;
2034  int inner_limit = f->inner_limit;
2035  int inner_filter = f->inner_filter;
2036  int linesize = s->linesize;
2037  int uvlinesize = s->uvlinesize;
2038  static const uint8_t hev_thresh_lut[2][64] = {
2039  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2040  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2041  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2042  3, 3, 3, 3 },
2043  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2044  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2045  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2046  2, 2, 2, 2 }
2047  };
2048 
2049  if (!filter_level)
2050  return;
2051 
2052  if (is_vp7) {
2053  bedge_lim_y = filter_level;
2054  bedge_lim_uv = filter_level * 2;
2055  mbedge_lim = filter_level + 2;
2056  } else {
2057  bedge_lim_y =
2058  bedge_lim_uv = filter_level * 2 + inner_limit;
2059  mbedge_lim = bedge_lim_y + 4;
2060  }
2061 
2062  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2063 
2064  if (mb_x) {
2065  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2066  mbedge_lim, inner_limit, hev_thresh);
2067  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2068  mbedge_lim, inner_limit, hev_thresh);
2069  }
2070 
2071 #define H_LOOP_FILTER_16Y_INNER(cond) \
2072  if (cond && inner_filter) { \
2073  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2074  bedge_lim_y, inner_limit, \
2075  hev_thresh); \
2076  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2077  bedge_lim_y, inner_limit, \
2078  hev_thresh); \
2079  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2080  bedge_lim_y, inner_limit, \
2081  hev_thresh); \
2082  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2083  uvlinesize, bedge_lim_uv, \
2084  inner_limit, hev_thresh); \
2085  }
2086 
2087  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2088 
2089  if (mb_y) {
2090  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2091  mbedge_lim, inner_limit, hev_thresh);
2092  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2093  mbedge_lim, inner_limit, hev_thresh);
2094  }
2095 
2096  if (inner_filter) {
2097  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2098  linesize, bedge_lim_y,
2099  inner_limit, hev_thresh);
2100  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2101  linesize, bedge_lim_y,
2102  inner_limit, hev_thresh);
2103  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2104  linesize, bedge_lim_y,
2105  inner_limit, hev_thresh);
2106  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2107  dst[2] + 4 * uvlinesize,
2108  uvlinesize, bedge_lim_uv,
2109  inner_limit, hev_thresh);
2110  }
2111 
2112  H_LOOP_FILTER_16Y_INNER(is_vp7)
2113 }
2114 
2115 static av_always_inline
2117  int mb_x, int mb_y)
2118 {
2119  int mbedge_lim, bedge_lim;
2120  int filter_level = f->filter_level;
2121  int inner_limit = f->inner_limit;
2122  int inner_filter = f->inner_filter;
2123  int linesize = s->linesize;
2124 
2125  if (!filter_level)
2126  return;
2127 
2128  bedge_lim = 2 * filter_level + inner_limit;
2129  mbedge_lim = bedge_lim + 4;
2130 
2131  if (mb_x)
2132  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2133  if (inner_filter) {
2134  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2135  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2136  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2137  }
2138 
2139  if (mb_y)
2140  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2141  if (inner_filter) {
2142  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2143  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2144  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2145  }
2146 }
2147 
2148 #define MARGIN (16 << 2)
2149 static av_always_inline
2151  VP8Frame *prev_frame, int is_vp7)
2152 {
2153  VP8Context *s = avctx->priv_data;
2154  int mb_x, mb_y;
2155 
2156  s->mv_min.y = -MARGIN;
2157  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2158  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2159  VP8Macroblock *mb = s->macroblocks_base +
2160  ((s->mb_width + 1) * (mb_y + 1) + 1);
2161  int mb_xy = mb_y * s->mb_width;
2162 
2163  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2164 
2165  s->mv_min.x = -MARGIN;
2166  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2167  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2168  if (mb_y == 0)
2169  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2170  DC_PRED * 0x01010101);
2171  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2172  prev_frame && prev_frame->seg_map ?
2173  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2174  s->mv_min.x -= 64;
2175  s->mv_max.x -= 64;
2176  }
2177  s->mv_min.y -= 64;
2178  s->mv_max.y -= 64;
2179  }
2180 }
2181 
2182 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2183  VP8Frame *prev_frame)
2184 {
2185  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2186 }
2187 
2188 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2189  VP8Frame *prev_frame)
2190 {
2191  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2192 }
2193 
2194 #if HAVE_THREADS
2195 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2196  do { \
2197  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2198  if (otd->thread_mb_pos < tmp) { \
2199  pthread_mutex_lock(&otd->lock); \
2200  td->wait_mb_pos = tmp; \
2201  do { \
2202  if (otd->thread_mb_pos >= tmp) \
2203  break; \
2204  pthread_cond_wait(&otd->cond, &otd->lock); \
2205  } while (1); \
2206  td->wait_mb_pos = INT_MAX; \
2207  pthread_mutex_unlock(&otd->lock); \
2208  } \
2209  } while (0);
2210 
2211 #define update_pos(td, mb_y, mb_x) \
2212  do { \
2213  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2214  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2215  (num_jobs > 1); \
2216  int is_null = !next_td || !prev_td; \
2217  int pos_check = (is_null) ? 1 \
2218  : (next_td != td && \
2219  pos >= next_td->wait_mb_pos) || \
2220  (prev_td != td && \
2221  pos >= prev_td->wait_mb_pos); \
2222  td->thread_mb_pos = pos; \
2223  if (sliced_threading && pos_check) { \
2224  pthread_mutex_lock(&td->lock); \
2225  pthread_cond_broadcast(&td->cond); \
2226  pthread_mutex_unlock(&td->lock); \
2227  } \
2228  } while (0);
2229 #else
2230 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)
2231 #define update_pos(td, mb_y, mb_x)
2232 #endif
2233 
2234 static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2235  int jobnr, int threadnr, int is_vp7)
2236 {
2237  VP8Context *s = avctx->priv_data;
2238  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2239  int mb_y = td->thread_mb_pos >> 16;
2240  int mb_x, mb_xy = mb_y * s->mb_width;
2241  int num_jobs = s->num_jobs;
2242  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2243  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2244  VP8Macroblock *mb;
2245  uint8_t *dst[3] = {
2246  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2247  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2248  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2249  };
2250  if (mb_y == 0)
2251  prev_td = td;
2252  else
2253  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2254  if (mb_y == s->mb_height - 1)
2255  next_td = td;
2256  else
2257  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2258  if (s->mb_layout == 1)
2259  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2260  else {
2261  // Make sure the previous frame has read its segmentation map,
2262  // if we re-use the same map.
2263  if (prev_frame && s->segmentation.enabled &&
2265  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2266  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2267  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2268  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2269  }
2270 
2271  if (!is_vp7 || mb_y == 0)
2272  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2273 
2274  s->mv_min.x = -MARGIN;
2275  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2276 
2277  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2278  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2279  if (prev_td != td) {
2280  if (threadnr != 0) {
2281  check_thread_pos(td, prev_td,
2282  mb_x + (is_vp7 ? 2 : 1),
2283  mb_y - (is_vp7 ? 2 : 1));
2284  } else {
2285  check_thread_pos(td, prev_td,
2286  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2287  mb_y - (is_vp7 ? 2 : 1));
2288  }
2289  }
2290 
2291  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2292  s->linesize, 4);
2293  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2294  dst[2] - dst[1], 2);
2295 
2296  if (!s->mb_layout)
2297  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2298  prev_frame && prev_frame->seg_map ?
2299  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2300 
2301  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2302 
2303  if (!mb->skip)
2304  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2305 
2306  if (mb->mode <= MODE_I4x4)
2307  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2308  else
2309  inter_predict(s, td, dst, mb, mb_x, mb_y);
2310 
2311  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2312 
2313  if (!mb->skip) {
2314  idct_mb(s, td, dst, mb);
2315  } else {
2316  AV_ZERO64(td->left_nnz);
2317  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2318 
2319  /* Reset DC block predictors if they would exist
2320  * if the mb had coefficients */
2321  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2322  td->left_nnz[8] = 0;
2323  s->top_nnz[mb_x][8] = 0;
2324  }
2325  }
2326 
2327  if (s->deblock_filter)
2328  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2329 
2330  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2331  if (s->filter.simple)
2332  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2333  NULL, NULL, s->linesize, 0, 1);
2334  else
2335  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2336  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2337  }
2338 
2339  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2340 
2341  dst[0] += 16;
2342  dst[1] += 8;
2343  dst[2] += 8;
2344  s->mv_min.x -= 64;
2345  s->mv_max.x -= 64;
2346 
2347  if (mb_x == s->mb_width + 1) {
2348  update_pos(td, mb_y, s->mb_width + 3);
2349  } else {
2350  update_pos(td, mb_y, mb_x);
2351  }
2352  }
2353 }
2354 
2355 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2356  int jobnr, int threadnr, int is_vp7)
2357 {
2358  VP8Context *s = avctx->priv_data;
2359  VP8ThreadData *td = &s->thread_data[threadnr];
2360  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2361  AVFrame *curframe = s->curframe->tf.f;
2362  VP8Macroblock *mb;
2363  VP8ThreadData *prev_td, *next_td;
2364  uint8_t *dst[3] = {
2365  curframe->data[0] + 16 * mb_y * s->linesize,
2366  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2367  curframe->data[2] + 8 * mb_y * s->uvlinesize
2368  };
2369 
2370  if (s->mb_layout == 1)
2371  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2372  else
2373  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2374 
2375  if (mb_y == 0)
2376  prev_td = td;
2377  else
2378  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2379  if (mb_y == s->mb_height - 1)
2380  next_td = td;
2381  else
2382  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2383 
2384  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2385  VP8FilterStrength *f = &td->filter_strength[mb_x];
2386  if (prev_td != td)
2387  check_thread_pos(td, prev_td,
2388  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2389  if (next_td != td)
2390  if (next_td != &s->thread_data[0])
2391  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2392 
2393  if (num_jobs == 1) {
2394  if (s->filter.simple)
2395  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2396  NULL, NULL, s->linesize, 0, 1);
2397  else
2398  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2399  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2400  }
2401 
2402  if (s->filter.simple)
2403  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2404  else
2405  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2406  dst[0] += 16;
2407  dst[1] += 8;
2408  dst[2] += 8;
2409 
2410  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2411  }
2412 }
2413 
2414 static av_always_inline
2415 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2416  int threadnr, int is_vp7)
2417 {
2418  VP8Context *s = avctx->priv_data;
2419  VP8ThreadData *td = &s->thread_data[jobnr];
2420  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2421  VP8Frame *curframe = s->curframe;
2422  int mb_y, num_jobs = s->num_jobs;
2423 
2424  td->thread_nr = threadnr;
2425  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2426  if (mb_y >= s->mb_height)
2427  break;
2428  td->thread_mb_pos = mb_y << 16;
2429  vp8_decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, is_vp7);
2430  if (s->deblock_filter)
2431  vp8_filter_mb_row(avctx, tdata, jobnr, threadnr, is_vp7);
2432  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2433 
2434  s->mv_min.y -= 64;
2435  s->mv_max.y -= 64;
2436 
2437  if (avctx->active_thread_type == FF_THREAD_FRAME)
2438  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2439  }
2440 
2441  return 0;
2442 }
2443 
2444 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2445  int jobnr, int threadnr)
2446 {
2447  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2448 }
2449 
2450 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2451  int jobnr, int threadnr)
2452 {
2453  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2454 }
2455 
2456 
2457 static av_always_inline
2458 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2459  AVPacket *avpkt, int is_vp7)
2460 {
2461  VP8Context *s = avctx->priv_data;
2462  int ret, i, referenced, num_jobs;
2463  enum AVDiscard skip_thresh;
2464  VP8Frame *av_uninit(curframe), *prev_frame;
2465 
2466  if (is_vp7)
2467  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2468  else
2469  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2470 
2471  if (ret < 0)
2472  goto err;
2473 
2474  prev_frame = s->framep[VP56_FRAME_CURRENT];
2475 
2476  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2478 
2479  skip_thresh = !referenced ? AVDISCARD_NONREF
2480  : !s->keyframe ? AVDISCARD_NONKEY
2481  : AVDISCARD_ALL;
2482 
2483  if (avctx->skip_frame >= skip_thresh) {
2484  s->invisible = 1;
2485  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2486  goto skip_decode;
2487  }
2488  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2489 
2490  // release no longer referenced frames
2491  for (i = 0; i < 5; i++)
2492  if (s->frames[i].tf.f->data[0] &&
2493  &s->frames[i] != prev_frame &&
2494  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2495  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2496  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2497  vp8_release_frame(s, &s->frames[i]);
2498 
2499  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2500 
2501  /* Given that arithmetic probabilities are updated every frame, it's quite
2502  * likely that the values we have on a random interframe are complete
2503  * junk if we didn't start decode on a keyframe. So just don't display
2504  * anything rather than junk. */
2505  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2506  !s->framep[VP56_FRAME_GOLDEN] ||
2507  !s->framep[VP56_FRAME_GOLDEN2])) {
2508  av_log(avctx, AV_LOG_WARNING,
2509  "Discarding interframe without a prior keyframe!\n");
2510  ret = AVERROR_INVALIDDATA;
2511  goto err;
2512  }
2513 
2514  curframe->tf.f->key_frame = s->keyframe;
2515  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2517  if ((ret = vp8_alloc_frame(s, curframe, referenced))) {
2518  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
2519  goto err;
2520  }
2521 
2522  // check if golden and altref are swapped
2523  if (s->update_altref != VP56_FRAME_NONE)
2525  else
2527 
2528  if (s->update_golden != VP56_FRAME_NONE)
2530  else
2532 
2533  if (s->update_last)
2534  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2535  else
2537 
2538  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2539 
2540  ff_thread_finish_setup(avctx);
2541 
2542  s->linesize = curframe->tf.f->linesize[0];
2543  s->uvlinesize = curframe->tf.f->linesize[1];
2544 
2545  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2546  /* Zero macroblock structures for top/top-left prediction
2547  * from outside the frame. */
2548  if (!s->mb_layout)
2549  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2550  (s->mb_width + 1) * sizeof(*s->macroblocks));
2551  if (!s->mb_layout && s->keyframe)
2552  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2553 
2554  memset(s->ref_count, 0, sizeof(s->ref_count));
2555 
2556  if (s->mb_layout == 1) {
2557  // Make sure the previous frame has read its segmentation map,
2558  // if we re-use the same map.
2559  if (prev_frame && s->segmentation.enabled &&
2561  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2562  if (is_vp7)
2563  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2564  else
2565  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2566  }
2567 
2568  if (avctx->active_thread_type == FF_THREAD_FRAME)
2569  num_jobs = 1;
2570  else
2571  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2572  s->num_jobs = num_jobs;
2573  s->curframe = curframe;
2574  s->prev_frame = prev_frame;
2575  s->mv_min.y = -MARGIN;
2576  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2577  for (i = 0; i < MAX_THREADS; i++) {
2578  s->thread_data[i].thread_mb_pos = 0;
2579  s->thread_data[i].wait_mb_pos = INT_MAX;
2580  }
2581  if (is_vp7)
2583  num_jobs);
2584  else
2586  num_jobs);
2587 
2588  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2589  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2590 
2591 skip_decode:
2592  // if future frames don't use the updated probabilities,
2593  // reset them to the values we saved
2594  if (!s->update_probabilities)
2595  s->prob[0] = s->prob[1];
2596 
2597  if (!s->invisible) {
2598  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2599  return ret;
2600  *got_frame = 1;
2601  }
2602 
2603  return avpkt->size;
2604 err:
2605  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2606  return ret;
2607 }
2608 
2609 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2610  AVPacket *avpkt)
2611 {
2612  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2613 }
2614 
2615 #if CONFIG_VP7_DECODER
2616 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2617  AVPacket *avpkt)
2618 {
2619  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2620 }
2621 #endif /* CONFIG_VP7_DECODER */
2622 
2624 {
2625  VP8Context *s = avctx->priv_data;
2626  int i;
2627 
2628  vp8_decode_flush_impl(avctx, 1);
2629  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2630  av_frame_free(&s->frames[i].tf.f);
2631 
2632  return 0;
2633 }
2634 
2636 {
2637  int i;
2638  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2639  s->frames[i].tf.f = av_frame_alloc();
2640  if (!s->frames[i].tf.f)
2641  return AVERROR(ENOMEM);
2642  }
2643  return 0;
2644 }
2645 
2646 static av_always_inline
2647 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2648 {
2649  VP8Context *s = avctx->priv_data;
2650  int ret;
2651 
2652  s->avctx = avctx;
2653  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2654  avctx->internal->allocate_progress = 1;
2655 
2656  ff_videodsp_init(&s->vdsp, 8);
2657 
2658  ff_vp78dsp_init(&s->vp8dsp);
2659  if (CONFIG_VP7_DECODER && is_vp7) {
2661  ff_vp7dsp_init(&s->vp8dsp);
2662  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2664  ff_vp8dsp_init(&s->vp8dsp);
2665  }
2666 
2667  /* does not change for VP8 */
2668  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
2669 
2670  if ((ret = vp8_init_frames(s)) < 0) {
2671  ff_vp8_decode_free(avctx);
2672  return ret;
2673  }
2674 
2675  return 0;
2676 }
2677 
2678 #if CONFIG_VP7_DECODER
2679 static int vp7_decode_init(AVCodecContext *avctx)
2680 {
2681  return vp78_decode_init(avctx, IS_VP7);
2682 }
2683 #endif /* CONFIG_VP7_DECODER */
2684 
2686 {
2687  return vp78_decode_init(avctx, IS_VP8);
2688 }
2689 
2690 #if CONFIG_VP8_DECODER
2691 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2692 {
2693  VP8Context *s = avctx->priv_data;
2694  int ret;
2695 
2696  s->avctx = avctx;
2697 
2698  if ((ret = vp8_init_frames(s)) < 0) {
2699  ff_vp8_decode_free(avctx);
2700  return ret;
2701  }
2702 
2703  return 0;
2704 }
2705 
2706 #define REBASE(pic) pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
2707 
2708 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2709  const AVCodecContext *src)
2710 {
2711  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2712  int i;
2713 
2714  if (s->macroblocks_base &&
2715  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2716  free_buffers(s);
2717  s->mb_width = s_src->mb_width;
2718  s->mb_height = s_src->mb_height;
2719  }
2720 
2721  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2722  s->segmentation = s_src->segmentation;
2723  s->lf_delta = s_src->lf_delta;
2724  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2725 
2726  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2727  if (s_src->frames[i].tf.f->data[0]) {
2728  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2729  if (ret < 0)
2730  return ret;
2731  }
2732  }
2733 
2734  s->framep[0] = REBASE(s_src->next_framep[0]);
2735  s->framep[1] = REBASE(s_src->next_framep[1]);
2736  s->framep[2] = REBASE(s_src->next_framep[2]);
2737  s->framep[3] = REBASE(s_src->next_framep[3]);
2738 
2739  return 0;
2740 }
2741 #endif /* CONFIG_VP8_DECODER */
2742 
2743 #if CONFIG_VP7_DECODER
2744 AVCodec ff_vp7_decoder = {
2745  .name = "vp7",
2746  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2747  .type = AVMEDIA_TYPE_VIDEO,
2748  .id = AV_CODEC_ID_VP7,
2749  .priv_data_size = sizeof(VP8Context),
2750  .init = vp7_decode_init,
2752  .decode = vp7_decode_frame,
2753  .capabilities = CODEC_CAP_DR1,
2755 };
2756 #endif /* CONFIG_VP7_DECODER */
2757 
2758 #if CONFIG_VP8_DECODER
2759 AVCodec ff_vp8_decoder = {
2760  .name = "vp8",
2761  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2762  .type = AVMEDIA_TYPE_VIDEO,
2763  .id = AV_CODEC_ID_VP8,
2764  .priv_data_size = sizeof(VP8Context),
2770  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2771  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2772 };
2773 #endif /* CONFIG_VP7_DECODER */
static void get_quants(VP8Context *s)
Definition: vp8.c:289
uint8_t golden
Definition: vp8.h:235
uint8_t inner_limit
Definition: vp8.h:80
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:178
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:719
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1546
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:786
discard all frames except keyframes
Definition: avcodec.h:567
Definition: vp9.h:55
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:752
#define DC_128_PRED8x8
Definition: h264pred.h:76
(only used in prediction) no split MVs
Definition: vp8.h:75
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:106
int size
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:220
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
struct VP8Context::@66 segmentation
Base parameters for segmentation, i.e.
VP56mv mv_min
Definition: vp8.h:153
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:156
static av_unused void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
Definition: w32pthreads.h:149
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1244
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1272
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:386
uint8_t feature_value[4][4]
Definition: vp8.h:293
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:133
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:180
uint8_t mbskip_enabled
Definition: vp8.h:151
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:335
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1294
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:238
uint8_t scan[16]
Definition: vp8.h:240
int linesize
Definition: vp8.h:146
int size
Definition: avcodec.h:974
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:111
#define MARGIN
Definition: vp8.c:2148
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1270
VP56mv bmv[16]
Definition: vp8.h:96
#define AV_RL16
Definition: intreadwrite.h:42
uint8_t inner_filter
Definition: vp8.h:81
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:231
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:811
discard all
Definition: avcodec.h:568
struct VP8Context::@70 prob[2]
These are all of the updatable probabilities for binary decisions.
static void copy_luma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:410
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:2812
uint8_t sharpness
Definition: vp8.h:175
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
2 16x8 blocks (vertical)
Definition: vp8.h:71
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:253
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2355
VP8Frame * framep[4]
Definition: vp8.h:139
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1306
static const uint8_t zigzag_scan[16]
Definition: h264data.h:54
#define VP7_MVC_SIZE
Definition: vp8.c:376
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:778
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1330
uint8_t(* top_nnz)[9]
Definition: vp8.h:220
int num_jobs
Definition: vp8.h:267
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Definition: avcodec.h:2743
#define MAX_THREADS
Definition: mpegvideo.h:66
#define AV_RN32A(p)
Definition: intreadwrite.h:446
uint8_t pred16x16[4]
Definition: vp8.h:236
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:167
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:145
int16_t y
Definition: vp56.h:67
static int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:749
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:246
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:94
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1432
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:189
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:278
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:896
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:116
uint8_t ref_frame
Definition: vp8.h:89
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1510
Multithreading support functions.
#define b
Definition: input.c:52
Definition: vp9.h:54
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2609
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
uint8_t mvc[2][19]
Definition: vp8.h:239
VP56mv mv
Definition: vp8.h:95
int8_t base_quant[4]
Definition: vp8.h:168
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:741
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:245
const char data[16]
Definition: mxf.c:70
uint8_t * data
Definition: avcodec.h:973
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2328
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:79
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:199
VP8Frame * prev_frame
Definition: vp8.h:142
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:259
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:264
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
uint8_t feature_index_prob[4][3]
Definition: vp8.h:292
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:93
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2458
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:181
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
#define r
Definition: input.c:51
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:672
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:890
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
H264PredContext hpc
Definition: vp8.h:263
Definition: vp8.h:130
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1827
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:166
uint16_t mb_width
Definition: vp8.h:144
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:763
#define FF_SIGNBIT(x)
Definition: internal.h:38
uint8_t last
Definition: vp8.h:234
static const int sizes[][2]
Definition: img2dec.c:46
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:619
uint8_t mode
Definition: vp8.h:88
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1475
static int pthread_mutex_init(pthread_mutex_t *m, void *attr)
Definition: w32pthreads.h:117
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2450
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:145
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2575
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:138
#define EDGE_EMU_LINESIZE
Definition: vp8.h:125
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:169
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:285
VideoDSPContext vdsp
Definition: vp8.h:261
const char * name
Name of the codec implementation.
Definition: avcodec.h:2819
VP8Macroblock * macroblocks_base
Definition: vp8.h:243
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1789
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:126
int16_t block[6][4][16]
Definition: vp8.h:100
struct VP8Context::@69 lf_delta
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1212
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:270
#define FFMAX(a, b)
Definition: common.h:55
uint8_t keyframe
Definition: vp8.h:149
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:185
VP56Frame
Definition: vp56.h:39
int16_t luma_qmul[2]
Definition: vp8.h:190
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:58
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
static int pthread_mutex_destroy(pthread_mutex_t *m)
Definition: w32pthreads.h:122
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
Definition: w32pthreads.h:173
4x4 blocks of 4x4px each
Definition: vp8.h:74
uint8_t deblock_filter
Definition: vp8.h:150
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2567
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:57
uint8_t feature_present_prob[4]
Definition: vp8.h:291
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1741
int16_t block_dc[16]
Definition: vp8.h:101
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:325
int width
picture width / height.
Definition: avcodec.h:1229
uint8_t mbskip
Definition: vp8.h:232
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:216
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2635
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:40
int32_t
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2230
static av_unused int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:313
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2568
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:191
int16_t chroma_qmul[2]
Definition: vp8.h:192
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
#define AV_RL32
Definition: intreadwrite.h:146
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:219
ThreadFrame tf
Definition: vp8.h:131
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:1996
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:779
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:774
#define vp56_rac_get_prob
Definition: vp56.h:243
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:98
#define CONFIG_VP8_DECODER
Definition: config.h:623
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1345
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:402
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2188
uint8_t segment
Definition: vp8.h:92
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2556
if(ac->has_optimized_func)
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2444
#define IS_VP8
Definition: vp8dsp.h:103
static const int8_t mv[256][2]
Definition: 4xm.c:75
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2182
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1484
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:260
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
NULL
Definition: eval.c:55
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
static int width
Definition: utils.c:156
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1850
VP8Frame * curframe
Definition: vp8.h:141
uint8_t simple
Definition: vp8.h:173
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:265
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:174
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:73
AVBufferRef * seg_map
Definition: vp8.h:132
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:798
main external API structure.
Definition: avcodec.h:1050
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:435
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:140
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:273
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:116
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:222
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:260
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:244
int coded_height
Definition: avcodec.h:1244
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:194
int index
Definition: gxfenc.c:72
struct VP8Context::@67 filter
VP8FilterStrength * filter_strength
Definition: vp8.h:127
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:740
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1466
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:360
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:165
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1934
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:379
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1659
int uvlinesize
Definition: vp8.h:147
static void update_refs(VP8Context *s)
Definition: vp8.c:399
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:393
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
VP56mv mv_max
Definition: vp8.h:154
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:81
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:730
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
static const int8_t vp8_segmentid_tree[][2]
Definition: vp8data.h:319
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1087
#define DC_127_PRED8x8
Definition: h264pred.h:85
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:65
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2685
#define AV_RL24
Definition: intreadwrite.h:78
int update_altref
Definition: vp8.h:247
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:290
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:207
2 8x16 blocks (horizontal)
Definition: vp8.h:72
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2623
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: vp8.c:1420
Definition: vp9.h:56
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
uint8_t pred8x8c[3]
Definition: vp8.h:237
int height
Definition: gxfenc.c:72
discard all non reference
Definition: avcodec.h:565
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2150
uint8_t partitioning
Definition: vp8.h:90
#define AV_ZERO64(d)
Definition: intreadwrite.h:538
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1122
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:66
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:759
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:755
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1904
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2234
int wait_mb_pos
Definition: vp8.h:123
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1797
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:60
uint8_t chroma_pred_mode
Definition: vp8.h:91
struct VP8Context::@68 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2729
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:297
int invisible
Definition: vp8.h:244
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:795
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:157
void * priv_data
Definition: avcodec.h:1092
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1500
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:62
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:877
#define XCHG(a, b, xchg)
#define CONFIG_VP7_DECODER
Definition: config.h:622
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2616
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2231
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1100
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:262
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:141
int thread_nr
Definition: vp8.h:117
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2415
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:560
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:347
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
#define av_uninit(x)
Definition: attributes.h:109
static void fade(uint8_t *dst, uint8_t *src, int width, int height, int linesize, int alpha, int beta)
Definition: vp8.c:421
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1683
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2029
#define IS_VP7
Definition: vp8dsp.h:102
#define av_always_inline
Definition: attributes.h:40
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:169
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:117
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:60
uint8_t intra
Definition: vp8.h:233
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:987
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:109
uint8_t skip
Definition: vp8.h:85
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:351
This structure stores compressed data.
Definition: avcodec.h:950
#define VP8_MVC_SIZE
Definition: vp8.c:377
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:470
uint8_t profile
Definition: vp8.h:152
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:850
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
VP8ThreadData * thread_data
Definition: vp8.h:137
Predicted.
Definition: avutil.h:254
int thread_mb_pos
Definition: vp8.h:122
2x2 blocks of 8x8px each
Definition: vp8.h:73
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2116
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:824
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2647
#define AV_WN64(p, v)
Definition: intreadwrite.h:342
uint8_t filter_level
Definition: vp8.h:79
static int16_t block[64]
Definition: dct-test.c:88