Libav
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 the ffmpeg project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 
36 #include "libavutil/imgutils.h"
37 #include "avcodec.h"
38 #include "internal.h"
39 #include "dsputil.h"
40 #include "get_bits.h"
41 #include "hpeldsp.h"
42 #include "videodsp.h"
43 #include "vp3data.h"
44 #include "vp3dsp.h"
45 #include "xiph.h"
46 #include "thread.h"
47 
48 #define FRAGMENT_PIXELS 8
49 
50 //FIXME split things out into their own arrays
51 typedef struct Vp3Fragment {
52  int16_t dc;
55 } Vp3Fragment;
56 
57 #define SB_NOT_CODED 0
58 #define SB_PARTIALLY_CODED 1
59 #define SB_FULLY_CODED 2
60 
61 // This is the maximum length of a single long bit run that can be encoded
62 // for superblock coding or block qps. Theora special-cases this to read a
63 // bit instead of flipping the current bit to allow for runs longer than 4129.
64 #define MAXIMUM_LONG_BIT_RUN 4129
65 
66 #define MODE_INTER_NO_MV 0
67 #define MODE_INTRA 1
68 #define MODE_INTER_PLUS_MV 2
69 #define MODE_INTER_LAST_MV 3
70 #define MODE_INTER_PRIOR_LAST 4
71 #define MODE_USING_GOLDEN 5
72 #define MODE_GOLDEN_MV 6
73 #define MODE_INTER_FOURMV 7
74 #define CODING_MODE_COUNT 8
75 
76 /* special internal mode */
77 #define MODE_COPY 8
78 
79 /* There are 6 preset schemes, plus a free-form scheme */
80 static const int ModeAlphabet[6][CODING_MODE_COUNT] =
81 {
82  /* scheme 1: Last motion vector dominates */
87 
88  /* scheme 2 */
93 
94  /* scheme 3 */
99 
100  /* scheme 4 */
105 
106  /* scheme 5: No motion vector dominates */
111 
112  /* scheme 6 */
117 
118 };
119 
120 static const uint8_t hilbert_offset[16][2] = {
121  {0,0}, {1,0}, {1,1}, {0,1},
122  {0,2}, {0,3}, {1,3}, {1,2},
123  {2,2}, {2,3}, {3,3}, {3,2},
124  {3,1}, {2,1}, {2,0}, {3,0}
125 };
126 
127 #define MIN_DEQUANT_VAL 2
128 
129 typedef struct Vp3DecodeContext {
132  int version;
133  int width, height;
138  int keyframe;
144  DECLARE_ALIGNED(16, int16_t, block)[64];
148 
149  int qps[3];
150  int nqps;
151  int last_qps[3];
152 
162  unsigned char *superblock_coding;
163 
167 
171 
174  int data_offset[3];
175 
176  int8_t (*motion_val[2])[2];
177 
178  /* tables */
179  uint16_t coded_dc_scale_factor[64];
180  uint32_t coded_ac_scale_factor[64];
183  uint8_t qr_size [2][3][64];
184  uint16_t qr_base[2][3][64];
185 
203  int16_t *dct_tokens[3][64];
204  int16_t *dct_tokens_base;
205 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
206 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
207 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
208 
212  int num_coded_frags[3][64];
214 
215  /* this is a list of indexes into the all_fragments array indicating
216  * which of the fragments are coded */
218 
219  VLC dc_vlc[16];
224 
229 
230  /* these arrays need to be on 16-byte boundaries since SSE2 operations
231  * index into them */
232  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64];
233 
234  /* This table contains superblock_count * 16 entries. Each set of 16
235  * numbers corresponds to the fragment indexes 0..15 of the superblock.
236  * An entry will be -1 to indicate that no entry corresponds to that
237  * index. */
239 
240  /* This is an array that indicates how a particular macroblock
241  * is coded. */
242  unsigned char *macroblock_coding;
243 
245 
246  /* Huffman decode */
247  int hti;
248  unsigned int hbits;
249  int entries;
251  uint32_t huffman_table[80][32][2];
252 
256 
257 /************************************************************************
258  * VP3 specific functions
259  ************************************************************************/
260 
261 static void vp3_decode_flush(AVCodecContext *avctx)
262 {
263  Vp3DecodeContext *s = avctx->priv_data;
264 
265  if (s->golden_frame.f)
267  if (s->last_frame.f)
269  if (s->current_frame.f)
271 }
272 
274 {
275  Vp3DecodeContext *s = avctx->priv_data;
276  int i;
277 
279  av_freep(&s->all_fragments);
284  av_freep(&s->motion_val[0]);
285  av_freep(&s->motion_val[1]);
287 
288  /* release all frames */
289  vp3_decode_flush(avctx);
293 
294  if (avctx->internal->is_copy)
295  return 0;
296 
297  for (i = 0; i < 16; i++) {
298  ff_free_vlc(&s->dc_vlc[i]);
299  ff_free_vlc(&s->ac_vlc_1[i]);
300  ff_free_vlc(&s->ac_vlc_2[i]);
301  ff_free_vlc(&s->ac_vlc_3[i]);
302  ff_free_vlc(&s->ac_vlc_4[i]);
303  }
304 
309 
310 
311  return 0;
312 }
313 
314 /*
315  * This function sets up all of the various blocks mappings:
316  * superblocks <-> fragments, macroblocks <-> fragments,
317  * superblocks <-> macroblocks
318  *
319  * @return 0 is successful; returns 1 if *anything* went wrong.
320  */
322 {
323  int sb_x, sb_y, plane;
324  int x, y, i, j = 0;
325 
326  for (plane = 0; plane < 3; plane++) {
327  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
328  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
329  int frag_width = s->fragment_width[!!plane];
330  int frag_height = s->fragment_height[!!plane];
331 
332  for (sb_y = 0; sb_y < sb_height; sb_y++)
333  for (sb_x = 0; sb_x < sb_width; sb_x++)
334  for (i = 0; i < 16; i++) {
335  x = 4*sb_x + hilbert_offset[i][0];
336  y = 4*sb_y + hilbert_offset[i][1];
337 
338  if (x < frag_width && y < frag_height)
339  s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
340  else
341  s->superblock_fragments[j++] = -1;
342  }
343  }
344 
345  return 0; /* successful path out */
346 }
347 
348 /*
349  * This function sets up the dequantization tables used for a particular
350  * frame.
351  */
352 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
353 {
354  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
355  int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
356  int i, plane, inter, qri, bmi, bmj, qistart;
357 
358  for(inter=0; inter<2; inter++){
359  for(plane=0; plane<3; plane++){
360  int sum=0;
361  for(qri=0; qri<s->qr_count[inter][plane]; qri++){
362  sum+= s->qr_size[inter][plane][qri];
363  if(s->qps[qpi] <= sum)
364  break;
365  }
366  qistart= sum - s->qr_size[inter][plane][qri];
367  bmi= s->qr_base[inter][plane][qri ];
368  bmj= s->qr_base[inter][plane][qri+1];
369  for(i=0; i<64; i++){
370  int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i]
371  - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
372  + s->qr_size[inter][plane][qri])
373  / (2*s->qr_size[inter][plane][qri]);
374 
375  int qmin= 8<<(inter + !i);
376  int qscale= i ? ac_scale_factor : dc_scale_factor;
377 
378  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
379  av_clip((qscale * coeff) / 100 * 4, qmin, 4096);
380  }
381  // all DC coefficients use the same quant so as not to interfere with DC prediction
382  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
383  }
384  }
385 }
386 
387 /*
388  * This function initializes the loop filter boundary limits if the frame's
389  * quality index is different from the previous frame's.
390  *
391  * The filter_limit_values may not be larger than 127.
392  */
394 {
395  int *bounding_values= s->bounding_values_array+127;
396  int filter_limit;
397  int x;
398  int value;
399 
400  filter_limit = s->filter_limit_values[s->qps[0]];
401  assert(filter_limit < 128);
402 
403  /* set up the bounding values */
404  memset(s->bounding_values_array, 0, 256 * sizeof(int));
405  for (x = 0; x < filter_limit; x++) {
406  bounding_values[-x] = -x;
407  bounding_values[x] = x;
408  }
409  for (x = value = filter_limit; x < 128 && value; x++, value--) {
410  bounding_values[ x] = value;
411  bounding_values[-x] = -value;
412  }
413  if (value)
414  bounding_values[128] = value;
415  bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202;
416 }
417 
418 /*
419  * This function unpacks all of the superblock/macroblock/fragment coding
420  * information from the bitstream.
421  */
423 {
424  int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start };
425  int bit = 0;
426  int current_superblock = 0;
427  int current_run = 0;
428  int num_partial_superblocks = 0;
429 
430  int i, j;
431  int current_fragment;
432  int plane;
433 
434  if (s->keyframe) {
436 
437  } else {
438 
439  /* unpack the list of partially-coded superblocks */
440  bit = get_bits1(gb) ^ 1;
441  current_run = 0;
442 
443  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
444  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
445  bit = get_bits1(gb);
446  else
447  bit ^= 1;
448 
449  current_run = get_vlc2(gb,
450  s->superblock_run_length_vlc.table, 6, 2) + 1;
451  if (current_run == 34)
452  current_run += get_bits(gb, 12);
453 
454  if (current_superblock + current_run > s->superblock_count) {
455  av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
456  return -1;
457  }
458 
459  memset(s->superblock_coding + current_superblock, bit, current_run);
460 
461  current_superblock += current_run;
462  if (bit)
463  num_partial_superblocks += current_run;
464  }
465 
466  /* unpack the list of fully coded superblocks if any of the blocks were
467  * not marked as partially coded in the previous step */
468  if (num_partial_superblocks < s->superblock_count) {
469  int superblocks_decoded = 0;
470 
471  current_superblock = 0;
472  bit = get_bits1(gb) ^ 1;
473  current_run = 0;
474 
475  while (superblocks_decoded < s->superblock_count - num_partial_superblocks
476  && get_bits_left(gb) > 0) {
477 
478  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
479  bit = get_bits1(gb);
480  else
481  bit ^= 1;
482 
483  current_run = get_vlc2(gb,
484  s->superblock_run_length_vlc.table, 6, 2) + 1;
485  if (current_run == 34)
486  current_run += get_bits(gb, 12);
487 
488  for (j = 0; j < current_run; current_superblock++) {
489  if (current_superblock >= s->superblock_count) {
490  av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
491  return -1;
492  }
493 
494  /* skip any superblocks already marked as partially coded */
495  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
496  s->superblock_coding[current_superblock] = 2*bit;
497  j++;
498  }
499  }
500  superblocks_decoded += current_run;
501  }
502  }
503 
504  /* if there were partial blocks, initialize bitstream for
505  * unpacking fragment codings */
506  if (num_partial_superblocks) {
507 
508  current_run = 0;
509  bit = get_bits1(gb);
510  /* toggle the bit because as soon as the first run length is
511  * fetched the bit will be toggled again */
512  bit ^= 1;
513  }
514  }
515 
516  /* figure out which fragments are coded; iterate through each
517  * superblock (all planes) */
518  s->total_num_coded_frags = 0;
520 
521  for (plane = 0; plane < 3; plane++) {
522  int sb_start = superblock_starts[plane];
523  int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
524  int num_coded_frags = 0;
525 
526  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
527 
528  /* iterate through all 16 fragments in a superblock */
529  for (j = 0; j < 16; j++) {
530 
531  /* if the fragment is in bounds, check its coding status */
532  current_fragment = s->superblock_fragments[i * 16 + j];
533  if (current_fragment != -1) {
534  int coded = s->superblock_coding[i];
535 
536  if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
537 
538  /* fragment may or may not be coded; this is the case
539  * that cares about the fragment coding runs */
540  if (current_run-- == 0) {
541  bit ^= 1;
542  current_run = get_vlc2(gb,
543  s->fragment_run_length_vlc.table, 5, 2);
544  }
545  coded = bit;
546  }
547 
548  if (coded) {
549  /* default mode; actual mode will be decoded in
550  * the next phase */
551  s->all_fragments[current_fragment].coding_method =
553  s->coded_fragment_list[plane][num_coded_frags++] =
554  current_fragment;
555  } else {
556  /* not coded; copy this fragment from the prior frame */
557  s->all_fragments[current_fragment].coding_method =
558  MODE_COPY;
559  }
560  }
561  }
562  }
563  s->total_num_coded_frags += num_coded_frags;
564  for (i = 0; i < 64; i++)
565  s->num_coded_frags[plane][i] = num_coded_frags;
566  if (plane < 2)
567  s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
568  }
569  return 0;
570 }
571 
572 /*
573  * This function unpacks all the coding mode data for individual macroblocks
574  * from the bitstream.
575  */
577 {
578  int i, j, k, sb_x, sb_y;
579  int scheme;
580  int current_macroblock;
581  int current_fragment;
582  int coding_mode;
583  int custom_mode_alphabet[CODING_MODE_COUNT];
584  const int *alphabet;
585  Vp3Fragment *frag;
586 
587  if (s->keyframe) {
588  for (i = 0; i < s->fragment_count; i++)
590 
591  } else {
592 
593  /* fetch the mode coding scheme for this frame */
594  scheme = get_bits(gb, 3);
595 
596  /* is it a custom coding scheme? */
597  if (scheme == 0) {
598  for (i = 0; i < 8; i++)
599  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
600  for (i = 0; i < 8; i++)
601  custom_mode_alphabet[get_bits(gb, 3)] = i;
602  alphabet = custom_mode_alphabet;
603  } else
604  alphabet = ModeAlphabet[scheme-1];
605 
606  /* iterate through all of the macroblocks that contain 1 or more
607  * coded fragments */
608  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
609  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
610  if (get_bits_left(gb) <= 0)
611  return -1;
612 
613  for (j = 0; j < 4; j++) {
614  int mb_x = 2*sb_x + (j>>1);
615  int mb_y = 2*sb_y + (((j>>1)+j)&1);
616  current_macroblock = mb_y * s->macroblock_width + mb_x;
617 
618  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
619  continue;
620 
621 #define BLOCK_X (2*mb_x + (k&1))
622 #define BLOCK_Y (2*mb_y + (k>>1))
623  /* coding modes are only stored if the macroblock has at least one
624  * luma block coded, otherwise it must be INTER_NO_MV */
625  for (k = 0; k < 4; k++) {
626  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
627  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
628  break;
629  }
630  if (k == 4) {
631  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
632  continue;
633  }
634 
635  /* mode 7 means get 3 bits for each coding mode */
636  if (scheme == 7)
637  coding_mode = get_bits(gb, 3);
638  else
639  coding_mode = alphabet
640  [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
641 
642  s->macroblock_coding[current_macroblock] = coding_mode;
643  for (k = 0; k < 4; k++) {
644  frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
645  if (frag->coding_method != MODE_COPY)
646  frag->coding_method = coding_mode;
647  }
648 
649 #define SET_CHROMA_MODES \
650  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
651  frag[s->fragment_start[1]].coding_method = coding_mode;\
652  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
653  frag[s->fragment_start[2]].coding_method = coding_mode;
654 
655  if (s->chroma_y_shift) {
656  frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
658  } else if (s->chroma_x_shift) {
659  frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
660  for (k = 0; k < 2; k++) {
662  frag += s->fragment_width[1];
663  }
664  } else {
665  for (k = 0; k < 4; k++) {
666  frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
668  }
669  }
670  }
671  }
672  }
673  }
674 
675  return 0;
676 }
677 
678 /*
679  * This function unpacks all the motion vectors for the individual
680  * macroblocks from the bitstream.
681  */
683 {
684  int j, k, sb_x, sb_y;
685  int coding_mode;
686  int motion_x[4];
687  int motion_y[4];
688  int last_motion_x = 0;
689  int last_motion_y = 0;
690  int prior_last_motion_x = 0;
691  int prior_last_motion_y = 0;
692  int current_macroblock;
693  int current_fragment;
694  int frag;
695 
696  if (s->keyframe)
697  return 0;
698 
699  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
700  coding_mode = get_bits1(gb);
701 
702  /* iterate through all of the macroblocks that contain 1 or more
703  * coded fragments */
704  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
705  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
706  if (get_bits_left(gb) <= 0)
707  return -1;
708 
709  for (j = 0; j < 4; j++) {
710  int mb_x = 2*sb_x + (j>>1);
711  int mb_y = 2*sb_y + (((j>>1)+j)&1);
712  current_macroblock = mb_y * s->macroblock_width + mb_x;
713 
714  if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
715  (s->macroblock_coding[current_macroblock] == MODE_COPY))
716  continue;
717 
718  switch (s->macroblock_coding[current_macroblock]) {
719 
720  case MODE_INTER_PLUS_MV:
721  case MODE_GOLDEN_MV:
722  /* all 6 fragments use the same motion vector */
723  if (coding_mode == 0) {
724  motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
725  motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
726  } else {
727  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
728  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
729  }
730 
731  /* vector maintenance, only on MODE_INTER_PLUS_MV */
732  if (s->macroblock_coding[current_macroblock] ==
734  prior_last_motion_x = last_motion_x;
735  prior_last_motion_y = last_motion_y;
736  last_motion_x = motion_x[0];
737  last_motion_y = motion_y[0];
738  }
739  break;
740 
741  case MODE_INTER_FOURMV:
742  /* vector maintenance */
743  prior_last_motion_x = last_motion_x;
744  prior_last_motion_y = last_motion_y;
745 
746  /* fetch 4 vectors from the bitstream, one for each
747  * Y fragment, then average for the C fragment vectors */
748  for (k = 0; k < 4; k++) {
749  current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
750  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
751  if (coding_mode == 0) {
752  motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
753  motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
754  } else {
755  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
756  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
757  }
758  last_motion_x = motion_x[k];
759  last_motion_y = motion_y[k];
760  } else {
761  motion_x[k] = 0;
762  motion_y[k] = 0;
763  }
764  }
765  break;
766 
767  case MODE_INTER_LAST_MV:
768  /* all 6 fragments use the last motion vector */
769  motion_x[0] = last_motion_x;
770  motion_y[0] = last_motion_y;
771 
772  /* no vector maintenance (last vector remains the
773  * last vector) */
774  break;
775 
777  /* all 6 fragments use the motion vector prior to the
778  * last motion vector */
779  motion_x[0] = prior_last_motion_x;
780  motion_y[0] = prior_last_motion_y;
781 
782  /* vector maintenance */
783  prior_last_motion_x = last_motion_x;
784  prior_last_motion_y = last_motion_y;
785  last_motion_x = motion_x[0];
786  last_motion_y = motion_y[0];
787  break;
788 
789  default:
790  /* covers intra, inter without MV, golden without MV */
791  motion_x[0] = 0;
792  motion_y[0] = 0;
793 
794  /* no vector maintenance */
795  break;
796  }
797 
798  /* assign the motion vectors to the correct fragments */
799  for (k = 0; k < 4; k++) {
800  current_fragment =
802  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
803  s->motion_val[0][current_fragment][0] = motion_x[k];
804  s->motion_val[0][current_fragment][1] = motion_y[k];
805  } else {
806  s->motion_val[0][current_fragment][0] = motion_x[0];
807  s->motion_val[0][current_fragment][1] = motion_y[0];
808  }
809  }
810 
811  if (s->chroma_y_shift) {
812  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
813  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
814  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
815  }
816  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
817  motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1);
818  frag = mb_y*s->fragment_width[1] + mb_x;
819  s->motion_val[1][frag][0] = motion_x[0];
820  s->motion_val[1][frag][1] = motion_y[0];
821  } else if (s->chroma_x_shift) {
822  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
823  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
824  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
825  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
826  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
827  } else {
828  motion_x[1] = motion_x[0];
829  motion_y[1] = motion_y[0];
830  }
831  motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1);
832  motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1);
833 
834  frag = 2*mb_y*s->fragment_width[1] + mb_x;
835  for (k = 0; k < 2; k++) {
836  s->motion_val[1][frag][0] = motion_x[k];
837  s->motion_val[1][frag][1] = motion_y[k];
838  frag += s->fragment_width[1];
839  }
840  } else {
841  for (k = 0; k < 4; k++) {
842  frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
843  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
844  s->motion_val[1][frag][0] = motion_x[k];
845  s->motion_val[1][frag][1] = motion_y[k];
846  } else {
847  s->motion_val[1][frag][0] = motion_x[0];
848  s->motion_val[1][frag][1] = motion_y[0];
849  }
850  }
851  }
852  }
853  }
854  }
855 
856  return 0;
857 }
858 
860 {
861  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
862  int num_blocks = s->total_num_coded_frags;
863 
864  for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) {
865  i = blocks_decoded = num_blocks_at_qpi = 0;
866 
867  bit = get_bits1(gb) ^ 1;
868  run_length = 0;
869 
870  do {
871  if (run_length == MAXIMUM_LONG_BIT_RUN)
872  bit = get_bits1(gb);
873  else
874  bit ^= 1;
875 
876  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
877  if (run_length == 34)
878  run_length += get_bits(gb, 12);
879  blocks_decoded += run_length;
880 
881  if (!bit)
882  num_blocks_at_qpi += run_length;
883 
884  for (j = 0; j < run_length; i++) {
885  if (i >= s->total_num_coded_frags)
886  return -1;
887 
888  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
889  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
890  j++;
891  }
892  }
893  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
894 
895  num_blocks -= num_blocks_at_qpi;
896  }
897 
898  return 0;
899 }
900 
901 /*
902  * This function is called by unpack_dct_coeffs() to extract the VLCs from
903  * the bitstream. The VLCs encode tokens which are used to unpack DCT
904  * data. This function unpacks all the VLCs for either the Y plane or both
905  * C planes, and is called for DC coefficients or different AC coefficient
906  * levels (since different coefficient types require different VLC tables.
907  *
908  * This function returns a residual eob run. E.g, if a particular token gave
909  * instructions to EOB the next 5 fragments and there were only 2 fragments
910  * left in the current fragment range, 3 would be returned so that it could
911  * be passed into the next call to this same function.
912  */
914  VLC *table, int coeff_index,
915  int plane,
916  int eob_run)
917 {
918  int i, j = 0;
919  int token;
920  int zero_run = 0;
921  int16_t coeff = 0;
922  int bits_to_get;
923  int blocks_ended;
924  int coeff_i = 0;
925  int num_coeffs = s->num_coded_frags[plane][coeff_index];
926  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
927 
928  /* local references to structure members to avoid repeated deferences */
929  int *coded_fragment_list = s->coded_fragment_list[plane];
930  Vp3Fragment *all_fragments = s->all_fragments;
931  VLC_TYPE (*vlc_table)[2] = table->table;
932 
933  if (num_coeffs < 0)
934  av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);
935 
936  if (eob_run > num_coeffs) {
937  coeff_i = blocks_ended = num_coeffs;
938  eob_run -= num_coeffs;
939  } else {
940  coeff_i = blocks_ended = eob_run;
941  eob_run = 0;
942  }
943 
944  // insert fake EOB token to cover the split between planes or zzi
945  if (blocks_ended)
946  dct_tokens[j++] = blocks_ended << 2;
947 
948  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
949  /* decode a VLC into a token */
950  token = get_vlc2(gb, vlc_table, 11, 3);
951  /* use the token to get a zero run, a coefficient, and an eob run */
952  if ((unsigned) token <= 6U) {
953  eob_run = eob_run_base[token];
954  if (eob_run_get_bits[token])
955  eob_run += get_bits(gb, eob_run_get_bits[token]);
956 
957  // record only the number of blocks ended in this plane,
958  // any spill will be recorded in the next plane.
959  if (eob_run > num_coeffs - coeff_i) {
960  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
961  blocks_ended += num_coeffs - coeff_i;
962  eob_run -= num_coeffs - coeff_i;
963  coeff_i = num_coeffs;
964  } else {
965  dct_tokens[j++] = TOKEN_EOB(eob_run);
966  blocks_ended += eob_run;
967  coeff_i += eob_run;
968  eob_run = 0;
969  }
970  } else if (token >= 0) {
971  bits_to_get = coeff_get_bits[token];
972  if (bits_to_get)
973  bits_to_get = get_bits(gb, bits_to_get);
974  coeff = coeff_tables[token][bits_to_get];
975 
976  zero_run = zero_run_base[token];
977  if (zero_run_get_bits[token])
978  zero_run += get_bits(gb, zero_run_get_bits[token]);
979 
980  if (zero_run) {
981  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
982  } else {
983  // Save DC into the fragment structure. DC prediction is
984  // done in raster order, so the actual DC can't be in with
985  // other tokens. We still need the token in dct_tokens[]
986  // however, or else the structure collapses on itself.
987  if (!coeff_index)
988  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
989 
990  dct_tokens[j++] = TOKEN_COEFF(coeff);
991  }
992 
993  if (coeff_index + zero_run > 64) {
994  av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
995  " %d coeffs left\n", zero_run, 64-coeff_index);
996  zero_run = 64 - coeff_index;
997  }
998 
999  // zero runs code multiple coefficients,
1000  // so don't try to decode coeffs for those higher levels
1001  for (i = coeff_index+1; i <= coeff_index+zero_run; i++)
1002  s->num_coded_frags[plane][i]--;
1003  coeff_i++;
1004  } else {
1006  "Invalid token %d\n", token);
1007  return -1;
1008  }
1009  }
1010 
1011  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1012  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1013 
1014  // decrement the number of blocks that have higher coeffecients for each
1015  // EOB run at this level
1016  if (blocks_ended)
1017  for (i = coeff_index+1; i < 64; i++)
1018  s->num_coded_frags[plane][i] -= blocks_ended;
1019 
1020  // setup the next buffer
1021  if (plane < 2)
1022  s->dct_tokens[plane+1][coeff_index] = dct_tokens + j;
1023  else if (coeff_index < 63)
1024  s->dct_tokens[0][coeff_index+1] = dct_tokens + j;
1025 
1026  return eob_run;
1027 }
1028 
1030  int first_fragment,
1031  int fragment_width,
1032  int fragment_height);
1033 /*
1034  * This function unpacks all of the DCT coefficient data from the
1035  * bitstream.
1036  */
1038 {
1039  int i;
1040  int dc_y_table;
1041  int dc_c_table;
1042  int ac_y_table;
1043  int ac_c_table;
1044  int residual_eob_run = 0;
1045  VLC *y_tables[64];
1046  VLC *c_tables[64];
1047 
1048  s->dct_tokens[0][0] = s->dct_tokens_base;
1049 
1050  /* fetch the DC table indexes */
1051  dc_y_table = get_bits(gb, 4);
1052  dc_c_table = get_bits(gb, 4);
1053 
1054  /* unpack the Y plane DC coefficients */
1055  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1056  0, residual_eob_run);
1057  if (residual_eob_run < 0)
1058  return residual_eob_run;
1059 
1060  /* reverse prediction of the Y-plane DC coefficients */
1062 
1063  /* unpack the C plane DC coefficients */
1064  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1065  1, residual_eob_run);
1066  if (residual_eob_run < 0)
1067  return residual_eob_run;
1068  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1069  2, residual_eob_run);
1070  if (residual_eob_run < 0)
1071  return residual_eob_run;
1072 
1073  /* reverse prediction of the C-plane DC coefficients */
1074  if (!(s->avctx->flags & CODEC_FLAG_GRAY))
1075  {
1077  s->fragment_width[1], s->fragment_height[1]);
1079  s->fragment_width[1], s->fragment_height[1]);
1080  }
1081 
1082  /* fetch the AC table indexes */
1083  ac_y_table = get_bits(gb, 4);
1084  ac_c_table = get_bits(gb, 4);
1085 
1086  /* build tables of AC VLC tables */
1087  for (i = 1; i <= 5; i++) {
1088  y_tables[i] = &s->ac_vlc_1[ac_y_table];
1089  c_tables[i] = &s->ac_vlc_1[ac_c_table];
1090  }
1091  for (i = 6; i <= 14; i++) {
1092  y_tables[i] = &s->ac_vlc_2[ac_y_table];
1093  c_tables[i] = &s->ac_vlc_2[ac_c_table];
1094  }
1095  for (i = 15; i <= 27; i++) {
1096  y_tables[i] = &s->ac_vlc_3[ac_y_table];
1097  c_tables[i] = &s->ac_vlc_3[ac_c_table];
1098  }
1099  for (i = 28; i <= 63; i++) {
1100  y_tables[i] = &s->ac_vlc_4[ac_y_table];
1101  c_tables[i] = &s->ac_vlc_4[ac_c_table];
1102  }
1103 
1104  /* decode all AC coefficents */
1105  for (i = 1; i <= 63; i++) {
1106  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1107  0, residual_eob_run);
1108  if (residual_eob_run < 0)
1109  return residual_eob_run;
1110 
1111  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1112  1, residual_eob_run);
1113  if (residual_eob_run < 0)
1114  return residual_eob_run;
1115  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1116  2, residual_eob_run);
1117  if (residual_eob_run < 0)
1118  return residual_eob_run;
1119  }
1120 
1121  return 0;
1122 }
1123 
1124 /*
1125  * This function reverses the DC prediction for each coded fragment in
1126  * the frame. Much of this function is adapted directly from the original
1127  * VP3 source code.
1128  */
1129 #define COMPATIBLE_FRAME(x) \
1130  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1131 #define DC_COEFF(u) s->all_fragments[u].dc
1132 
1134  int first_fragment,
1135  int fragment_width,
1136  int fragment_height)
1137 {
1138 
1139 #define PUL 8
1140 #define PU 4
1141 #define PUR 2
1142 #define PL 1
1143 
1144  int x, y;
1145  int i = first_fragment;
1146 
1147  int predicted_dc;
1148 
1149  /* DC values for the left, up-left, up, and up-right fragments */
1150  int vl, vul, vu, vur;
1151 
1152  /* indexes for the left, up-left, up, and up-right fragments */
1153  int l, ul, u, ur;
1154 
1155  /*
1156  * The 6 fields mean:
1157  * 0: up-left multiplier
1158  * 1: up multiplier
1159  * 2: up-right multiplier
1160  * 3: left multiplier
1161  */
1162  static const int predictor_transform[16][4] = {
1163  { 0, 0, 0, 0},
1164  { 0, 0, 0,128}, // PL
1165  { 0, 0,128, 0}, // PUR
1166  { 0, 0, 53, 75}, // PUR|PL
1167  { 0,128, 0, 0}, // PU
1168  { 0, 64, 0, 64}, // PU|PL
1169  { 0,128, 0, 0}, // PU|PUR
1170  { 0, 0, 53, 75}, // PU|PUR|PL
1171  {128, 0, 0, 0}, // PUL
1172  { 0, 0, 0,128}, // PUL|PL
1173  { 64, 0, 64, 0}, // PUL|PUR
1174  { 0, 0, 53, 75}, // PUL|PUR|PL
1175  { 0,128, 0, 0}, // PUL|PU
1176  {-104,116, 0,116}, // PUL|PU|PL
1177  { 24, 80, 24, 0}, // PUL|PU|PUR
1178  {-104,116, 0,116} // PUL|PU|PUR|PL
1179  };
1180 
1181  /* This table shows which types of blocks can use other blocks for
1182  * prediction. For example, INTRA is the only mode in this table to
1183  * have a frame number of 0. That means INTRA blocks can only predict
1184  * from other INTRA blocks. There are 2 golden frame coding types;
1185  * blocks encoding in these modes can only predict from other blocks
1186  * that were encoded with these 1 of these 2 modes. */
1187  static const unsigned char compatible_frame[9] = {
1188  1, /* MODE_INTER_NO_MV */
1189  0, /* MODE_INTRA */
1190  1, /* MODE_INTER_PLUS_MV */
1191  1, /* MODE_INTER_LAST_MV */
1192  1, /* MODE_INTER_PRIOR_MV */
1193  2, /* MODE_USING_GOLDEN */
1194  2, /* MODE_GOLDEN_MV */
1195  1, /* MODE_INTER_FOUR_MV */
1196  3 /* MODE_COPY */
1197  };
1198  int current_frame_type;
1199 
1200  /* there is a last DC predictor for each of the 3 frame types */
1201  short last_dc[3];
1202 
1203  int transform = 0;
1204 
1205  vul = vu = vur = vl = 0;
1206  last_dc[0] = last_dc[1] = last_dc[2] = 0;
1207 
1208  /* for each fragment row... */
1209  for (y = 0; y < fragment_height; y++) {
1210 
1211  /* for each fragment in a row... */
1212  for (x = 0; x < fragment_width; x++, i++) {
1213 
1214  /* reverse prediction if this block was coded */
1215  if (s->all_fragments[i].coding_method != MODE_COPY) {
1216 
1217  current_frame_type =
1218  compatible_frame[s->all_fragments[i].coding_method];
1219 
1220  transform= 0;
1221  if(x){
1222  l= i-1;
1223  vl = DC_COEFF(l);
1224  if(COMPATIBLE_FRAME(l))
1225  transform |= PL;
1226  }
1227  if(y){
1228  u= i-fragment_width;
1229  vu = DC_COEFF(u);
1230  if(COMPATIBLE_FRAME(u))
1231  transform |= PU;
1232  if(x){
1233  ul= i-fragment_width-1;
1234  vul = DC_COEFF(ul);
1235  if(COMPATIBLE_FRAME(ul))
1236  transform |= PUL;
1237  }
1238  if(x + 1 < fragment_width){
1239  ur= i-fragment_width+1;
1240  vur = DC_COEFF(ur);
1241  if(COMPATIBLE_FRAME(ur))
1242  transform |= PUR;
1243  }
1244  }
1245 
1246  if (transform == 0) {
1247 
1248  /* if there were no fragments to predict from, use last
1249  * DC saved */
1250  predicted_dc = last_dc[current_frame_type];
1251  } else {
1252 
1253  /* apply the appropriate predictor transform */
1254  predicted_dc =
1255  (predictor_transform[transform][0] * vul) +
1256  (predictor_transform[transform][1] * vu) +
1257  (predictor_transform[transform][2] * vur) +
1258  (predictor_transform[transform][3] * vl);
1259 
1260  predicted_dc /= 128;
1261 
1262  /* check for outranging on the [ul u l] and
1263  * [ul u ur l] predictors */
1264  if ((transform == 15) || (transform == 13)) {
1265  if (FFABS(predicted_dc - vu) > 128)
1266  predicted_dc = vu;
1267  else if (FFABS(predicted_dc - vl) > 128)
1268  predicted_dc = vl;
1269  else if (FFABS(predicted_dc - vul) > 128)
1270  predicted_dc = vul;
1271  }
1272  }
1273 
1274  /* at long last, apply the predictor */
1275  DC_COEFF(i) += predicted_dc;
1276  /* save the DC */
1277  last_dc[current_frame_type] = DC_COEFF(i);
1278  }
1279  }
1280  }
1281 }
1282 
1283 static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
1284 {
1285  int x, y;
1286  int *bounding_values= s->bounding_values_array+127;
1287 
1288  int width = s->fragment_width[!!plane];
1289  int height = s->fragment_height[!!plane];
1290  int fragment = s->fragment_start [plane] + ystart * width;
1291  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1292  uint8_t *plane_data = s->current_frame.f->data [plane];
1293  if (!s->flipped_image) stride = -stride;
1294  plane_data += s->data_offset[plane] + 8*ystart*stride;
1295 
1296  for (y = ystart; y < yend; y++) {
1297 
1298  for (x = 0; x < width; x++) {
1299  /* This code basically just deblocks on the edges of coded blocks.
1300  * However, it has to be much more complicated because of the
1301  * braindamaged deblock ordering used in VP3/Theora. Order matters
1302  * because some pixels get filtered twice. */
1303  if( s->all_fragments[fragment].coding_method != MODE_COPY )
1304  {
1305  /* do not perform left edge filter for left columns frags */
1306  if (x > 0) {
1307  s->vp3dsp.h_loop_filter(
1308  plane_data + 8*x,
1309  stride, bounding_values);
1310  }
1311 
1312  /* do not perform top edge filter for top row fragments */
1313  if (y > 0) {
1314  s->vp3dsp.v_loop_filter(
1315  plane_data + 8*x,
1316  stride, bounding_values);
1317  }
1318 
1319  /* do not perform right edge filter for right column
1320  * fragments or if right fragment neighbor is also coded
1321  * in this frame (it will be filtered in next iteration) */
1322  if ((x < width - 1) &&
1323  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1324  s->vp3dsp.h_loop_filter(
1325  plane_data + 8*x + 8,
1326  stride, bounding_values);
1327  }
1328 
1329  /* do not perform bottom edge filter for bottom row
1330  * fragments or if bottom fragment neighbor is also coded
1331  * in this frame (it will be filtered in the next row) */
1332  if ((y < height - 1) &&
1333  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1334  s->vp3dsp.v_loop_filter(
1335  plane_data + 8*x + 8*stride,
1336  stride, bounding_values);
1337  }
1338  }
1339 
1340  fragment++;
1341  }
1342  plane_data += 8*stride;
1343  }
1344 }
1345 
1350 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1351  int plane, int inter, int16_t block[64])
1352 {
1353  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1354  uint8_t *perm = s->idct_scantable;
1355  int i = 0;
1356 
1357  do {
1358  int token = *s->dct_tokens[plane][i];
1359  switch (token & 3) {
1360  case 0: // EOB
1361  if (--token < 4) // 0-3 are token types, so the EOB run must now be 0
1362  s->dct_tokens[plane][i]++;
1363  else
1364  *s->dct_tokens[plane][i] = token & ~3;
1365  goto end;
1366  case 1: // zero run
1367  s->dct_tokens[plane][i]++;
1368  i += (token >> 2) & 0x7f;
1369  if (i > 63) {
1370  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1371  return i;
1372  }
1373  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1374  i++;
1375  break;
1376  case 2: // coeff
1377  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1378  s->dct_tokens[plane][i++]++;
1379  break;
1380  default: // shouldn't happen
1381  return i;
1382  }
1383  } while (i < 64);
1384  // return value is expected to be a valid level
1385  i--;
1386 end:
1387  // the actual DC+prediction is in the fragment structure
1388  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1389  return i;
1390 }
1391 
1396 {
1397  int h, cy, i;
1398  int offset[AV_NUM_DATA_POINTERS];
1399 
1401  int y_flipped = s->flipped_image ? s->avctx->height-y : y;
1402 
1403  // At the end of the frame, report INT_MAX instead of the height of the frame.
1404  // This makes the other threads' ff_thread_await_progress() calls cheaper, because
1405  // they don't have to clip their values.
1406  ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
1407  }
1408 
1409  if(s->avctx->draw_horiz_band==NULL)
1410  return;
1411 
1412  h= y - s->last_slice_end;
1413  s->last_slice_end= y;
1414  y -= h;
1415 
1416  if (!s->flipped_image) {
1417  y = s->avctx->height - y - h;
1418  }
1419 
1420  cy = y >> s->chroma_y_shift;
1421  offset[0] = s->current_frame.f->linesize[0]*y;
1422  offset[1] = s->current_frame.f->linesize[1]*cy;
1423  offset[2] = s->current_frame.f->linesize[2]*cy;
1424  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1425  offset[i] = 0;
1426 
1427  emms_c();
1428  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1429 }
1430 
1435 static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
1436 {
1438  int ref_row;
1439  int border = motion_y&1;
1440 
1441  if (fragment->coding_method == MODE_USING_GOLDEN ||
1442  fragment->coding_method == MODE_GOLDEN_MV)
1443  ref_frame = &s->golden_frame;
1444  else
1445  ref_frame = &s->last_frame;
1446 
1447  ref_row = y + (motion_y>>1);
1448  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1449 
1450  ff_thread_await_progress(ref_frame, ref_row, 0);
1451 }
1452 
1453 /*
1454  * Perform the final rendering for a particular slice of data.
1455  * The slice number ranges from 0..(c_superblock_height - 1).
1456  */
1457 static void render_slice(Vp3DecodeContext *s, int slice)
1458 {
1459  int x, y, i, j, fragment;
1460  int16_t *block = s->block;
1461  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1462  int motion_halfpel_index;
1463  uint8_t *motion_source;
1464  int plane, first_pixel;
1465 
1466  if (slice >= s->c_superblock_height)
1467  return;
1468 
1469  for (plane = 0; plane < 3; plane++) {
1470  uint8_t *output_plane = s->current_frame.f->data [plane] + s->data_offset[plane];
1471  uint8_t * last_plane = s-> last_frame.f->data [plane] + s->data_offset[plane];
1472  uint8_t *golden_plane = s-> golden_frame.f->data [plane] + s->data_offset[plane];
1473  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1474  int plane_width = s->width >> (plane && s->chroma_x_shift);
1475  int plane_height = s->height >> (plane && s->chroma_y_shift);
1476  int8_t (*motion_val)[2] = s->motion_val[!!plane];
1477 
1478  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
1479  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
1480  int slice_width = plane ? s->c_superblock_width : s->y_superblock_width;
1481 
1482  int fragment_width = s->fragment_width[!!plane];
1483  int fragment_height = s->fragment_height[!!plane];
1484  int fragment_start = s->fragment_start[plane];
1485  int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
1486 
1487  if (!s->flipped_image) stride = -stride;
1488  if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
1489  continue;
1490 
1491  /* for each superblock row in the slice (both of them)... */
1492  for (; sb_y < slice_height; sb_y++) {
1493 
1494  /* for each superblock in a row... */
1495  for (sb_x = 0; sb_x < slice_width; sb_x++) {
1496 
1497  /* for each block in a superblock... */
1498  for (j = 0; j < 16; j++) {
1499  x = 4*sb_x + hilbert_offset[j][0];
1500  y = 4*sb_y + hilbert_offset[j][1];
1501  fragment = y*fragment_width + x;
1502 
1503  i = fragment_start + fragment;
1504 
1505  // bounds check
1506  if (x >= fragment_width || y >= fragment_height)
1507  continue;
1508 
1509  first_pixel = 8*y*stride + 8*x;
1510 
1511  if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
1512  await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);
1513 
1514  /* transform if this block was coded */
1515  if (s->all_fragments[i].coding_method != MODE_COPY) {
1518  motion_source= golden_plane;
1519  else
1520  motion_source= last_plane;
1521 
1522  motion_source += first_pixel;
1523  motion_halfpel_index = 0;
1524 
1525  /* sort out the motion vector if this fragment is coded
1526  * using a motion vector method */
1527  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
1529  int src_x, src_y;
1530  motion_x = motion_val[fragment][0];
1531  motion_y = motion_val[fragment][1];
1532 
1533  src_x= (motion_x>>1) + 8*x;
1534  src_y= (motion_y>>1) + 8*y;
1535 
1536  motion_halfpel_index = motion_x & 0x01;
1537  motion_source += (motion_x >> 1);
1538 
1539  motion_halfpel_index |= (motion_y & 0x01) << 1;
1540  motion_source += ((motion_y >> 1) * stride);
1541 
1542  if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1543  uint8_t *temp= s->edge_emu_buffer;
1544  if(stride<0) temp -= 8*stride;
1545 
1546  s->vdsp.emulated_edge_mc(temp, motion_source,
1547  stride, stride,
1548  9, 9, src_x, src_y,
1549  plane_width,
1550  plane_height);
1551  motion_source= temp;
1552  }
1553  }
1554 
1555 
1556  /* first, take care of copying a block from either the
1557  * previous or the golden frame */
1558  if (s->all_fragments[i].coding_method != MODE_INTRA) {
1559  /* Note, it is possible to implement all MC cases with
1560  put_no_rnd_pixels_l2 which would look more like the
1561  VP3 source but this would be slower as
1562  put_no_rnd_pixels_tab is better optimzed */
1563  if(motion_halfpel_index != 3){
1564  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1565  output_plane + first_pixel,
1566  motion_source, stride, 8);
1567  }else{
1568  int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
1570  output_plane + first_pixel,
1571  motion_source - d,
1572  motion_source + stride + 1 + d,
1573  stride, 8);
1574  }
1575  }
1576 
1577  /* invert DCT and place (or add) in final output */
1578 
1579  if (s->all_fragments[i].coding_method == MODE_INTRA) {
1580  int index;
1581  index = vp3_dequant(s, s->all_fragments + i, plane, 0, block);
1582  if (index > 63)
1583  continue;
1584  s->vp3dsp.idct_put(
1585  output_plane + first_pixel,
1586  stride,
1587  block);
1588  } else {
1589  int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block);
1590  if (index > 63)
1591  continue;
1592  if (index > 0) {
1593  s->vp3dsp.idct_add(
1594  output_plane + first_pixel,
1595  stride,
1596  block);
1597  } else {
1598  s->vp3dsp.idct_dc_add(output_plane + first_pixel, stride, block);
1599  }
1600  }
1601  } else {
1602 
1603  /* copy directly from the previous frame */
1604  s->hdsp.put_pixels_tab[1][0](
1605  output_plane + first_pixel,
1606  last_plane + first_pixel,
1607  stride, 8);
1608 
1609  }
1610  }
1611  }
1612 
1613  // Filter up to the last row in the superblock row
1614  if (!s->skip_loop_filter)
1615  apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
1616  }
1617  }
1618 
1619  /* this looks like a good place for slice dispatch... */
1620  /* algorithm:
1621  * if (slice == s->macroblock_height - 1)
1622  * dispatch (both last slice & 2nd-to-last slice);
1623  * else if (slice > 0)
1624  * dispatch (slice - 1);
1625  */
1626 
1627  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
1628 }
1629 
1632 {
1633  Vp3DecodeContext *s = avctx->priv_data;
1634  int y_fragment_count, c_fragment_count;
1635 
1636  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1637  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1638 
1641  s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
1642  s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
1643  s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
1644  s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
1645 
1646  /* work out the block mapping tables */
1647  s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
1649 
1650  if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
1652  !s->motion_val[0] || !s->motion_val[1]) {
1653  vp3_decode_end(avctx);
1654  return -1;
1655  }
1656 
1657  init_block_mapping(s);
1658 
1659  return 0;
1660 }
1661 
1663 {
1665  s->last_frame.f = av_frame_alloc();
1666  s->golden_frame.f = av_frame_alloc();
1667 
1668  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f) {
1670  av_frame_free(&s->last_frame.f);
1672  return AVERROR(ENOMEM);
1673  }
1674 
1675  return 0;
1676 }
1677 
1679 {
1680  Vp3DecodeContext *s = avctx->priv_data;
1681  int i, inter, plane, ret;
1682  int c_width;
1683  int c_height;
1684  int y_fragment_count, c_fragment_count;
1685 
1686  ret = init_frames(s);
1687  if (ret < 0)
1688  return ret;
1689 
1690  avctx->internal->allocate_progress = 1;
1691 
1692  if (avctx->codec_tag == MKTAG('V','P','3','0'))
1693  s->version = 0;
1694  else
1695  s->version = 1;
1696 
1697  s->avctx = avctx;
1698  s->width = FFALIGN(avctx->width, 16);
1699  s->height = FFALIGN(avctx->height, 16);
1700  if (avctx->pix_fmt == AV_PIX_FMT_NONE)
1701  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1704  ff_videodsp_init(&s->vdsp, 8);
1705  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
1706 
1707  for (i = 0; i < 64; i++) {
1708 #define T(x) (x >> 3) | ((x & 7) << 3)
1709  s->idct_permutation[i] = T(i);
1710  s->idct_scantable[i] = T(ff_zigzag_direct[i]);
1711 #undef T
1712  }
1713 
1714  /* initialize to an impossible value which will force a recalculation
1715  * in the first frame decode */
1716  for (i = 0; i < 3; i++)
1717  s->qps[i] = -1;
1718 
1720  &s->chroma_y_shift);
1721 
1722  s->y_superblock_width = (s->width + 31) / 32;
1723  s->y_superblock_height = (s->height + 31) / 32;
1725 
1726  /* work out the dimensions for the C planes */
1727  c_width = s->width >> s->chroma_x_shift;
1728  c_height = s->height >> s->chroma_y_shift;
1729  s->c_superblock_width = (c_width + 31) / 32;
1730  s->c_superblock_height = (c_height + 31) / 32;
1732 
1736 
1737  s->macroblock_width = (s->width + 15) / 16;
1738  s->macroblock_height = (s->height + 15) / 16;
1740 
1741  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
1742  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
1743  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
1744  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
1745 
1746  /* fragment count covers all 8x8 blocks for all 3 planes */
1747  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1748  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1749  s->fragment_count = y_fragment_count + 2*c_fragment_count;
1750  s->fragment_start[1] = y_fragment_count;
1751  s->fragment_start[2] = y_fragment_count + c_fragment_count;
1752 
1753  if (!s->theora_tables)
1754  {
1755  for (i = 0; i < 64; i++) {
1758  s->base_matrix[0][i] = vp31_intra_y_dequant[i];
1759  s->base_matrix[1][i] = vp31_intra_c_dequant[i];
1760  s->base_matrix[2][i] = vp31_inter_dequant[i];
1762  }
1763 
1764  for(inter=0; inter<2; inter++){
1765  for(plane=0; plane<3; plane++){
1766  s->qr_count[inter][plane]= 1;
1767  s->qr_size [inter][plane][0]= 63;
1768  s->qr_base [inter][plane][0]=
1769  s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter;
1770  }
1771  }
1772 
1773  /* init VLC tables */
1774  for (i = 0; i < 16; i++) {
1775 
1776  /* DC histograms */
1777  init_vlc(&s->dc_vlc[i], 11, 32,
1778  &dc_bias[i][0][1], 4, 2,
1779  &dc_bias[i][0][0], 4, 2, 0);
1780 
1781  /* group 1 AC histograms */
1782  init_vlc(&s->ac_vlc_1[i], 11, 32,
1783  &ac_bias_0[i][0][1], 4, 2,
1784  &ac_bias_0[i][0][0], 4, 2, 0);
1785 
1786  /* group 2 AC histograms */
1787  init_vlc(&s->ac_vlc_2[i], 11, 32,
1788  &ac_bias_1[i][0][1], 4, 2,
1789  &ac_bias_1[i][0][0], 4, 2, 0);
1790 
1791  /* group 3 AC histograms */
1792  init_vlc(&s->ac_vlc_3[i], 11, 32,
1793  &ac_bias_2[i][0][1], 4, 2,
1794  &ac_bias_2[i][0][0], 4, 2, 0);
1795 
1796  /* group 4 AC histograms */
1797  init_vlc(&s->ac_vlc_4[i], 11, 32,
1798  &ac_bias_3[i][0][1], 4, 2,
1799  &ac_bias_3[i][0][0], 4, 2, 0);
1800  }
1801  } else {
1802 
1803  for (i = 0; i < 16; i++) {
1804  /* DC histograms */
1805  if (init_vlc(&s->dc_vlc[i], 11, 32,
1806  &s->huffman_table[i][0][1], 8, 4,
1807  &s->huffman_table[i][0][0], 8, 4, 0) < 0)
1808  goto vlc_fail;
1809 
1810  /* group 1 AC histograms */
1811  if (init_vlc(&s->ac_vlc_1[i], 11, 32,
1812  &s->huffman_table[i+16][0][1], 8, 4,
1813  &s->huffman_table[i+16][0][0], 8, 4, 0) < 0)
1814  goto vlc_fail;
1815 
1816  /* group 2 AC histograms */
1817  if (init_vlc(&s->ac_vlc_2[i], 11, 32,
1818  &s->huffman_table[i+16*2][0][1], 8, 4,
1819  &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0)
1820  goto vlc_fail;
1821 
1822  /* group 3 AC histograms */
1823  if (init_vlc(&s->ac_vlc_3[i], 11, 32,
1824  &s->huffman_table[i+16*3][0][1], 8, 4,
1825  &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0)
1826  goto vlc_fail;
1827 
1828  /* group 4 AC histograms */
1829  if (init_vlc(&s->ac_vlc_4[i], 11, 32,
1830  &s->huffman_table[i+16*4][0][1], 8, 4,
1831  &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0)
1832  goto vlc_fail;
1833  }
1834  }
1835 
1837  &superblock_run_length_vlc_table[0][1], 4, 2,
1838  &superblock_run_length_vlc_table[0][0], 4, 2, 0);
1839 
1840  init_vlc(&s->fragment_run_length_vlc, 5, 30,
1841  &fragment_run_length_vlc_table[0][1], 4, 2,
1842  &fragment_run_length_vlc_table[0][0], 4, 2, 0);
1843 
1844  init_vlc(&s->mode_code_vlc, 3, 8,
1845  &mode_code_vlc_table[0][1], 2, 1,
1846  &mode_code_vlc_table[0][0], 2, 1, 0);
1847 
1848  init_vlc(&s->motion_vector_vlc, 6, 63,
1849  &motion_vector_vlc_table[0][1], 2, 1,
1850  &motion_vector_vlc_table[0][0], 2, 1, 0);
1851 
1852  return allocate_tables(avctx);
1853 
1854 vlc_fail:
1855  av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
1856  return -1;
1857 }
1858 
1860 static int update_frames(AVCodecContext *avctx)
1861 {
1862  Vp3DecodeContext *s = avctx->priv_data;
1863  int ret = 0;
1864 
1865 
1866  /* shuffle frames (last = current) */
1869  if (ret < 0)
1870  goto fail;
1871 
1872  if (s->keyframe) {
1875  }
1876 
1877 fail:
1879  return ret;
1880 }
1881 
1883 {
1885  if (src->f->data[0])
1886  return ff_thread_ref_frame(dst, src);
1887  return 0;
1888 }
1889 
1891 {
1892  int ret;
1893  if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
1894  (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
1895  (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
1896  return ret;
1897  return 0;
1898 }
1899 
1901 {
1902  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
1903  int qps_changed = 0, i, err;
1904 
1905 #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
1906 
1907  if (!s1->current_frame.f->data[0]
1908  ||s->width != s1->width
1909  ||s->height!= s1->height) {
1910  if (s != s1)
1911  ref_frames(s, s1);
1912  return -1;
1913  }
1914 
1915  if (s != s1) {
1916  // init tables if the first frame hasn't been decoded
1917  if (!s->current_frame.f->data[0]) {
1918  int y_fragment_count, c_fragment_count;
1919  s->avctx = dst;
1920  err = allocate_tables(dst);
1921  if (err)
1922  return err;
1923  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
1924  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
1925  memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
1926  memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
1927  }
1928 
1929  // copy previous frame data
1930  if ((err = ref_frames(s, s1)) < 0)
1931  return err;
1932 
1933  s->keyframe = s1->keyframe;
1934 
1935  // copy qscale data if necessary
1936  for (i = 0; i < 3; i++) {
1937  if (s->qps[i] != s1->qps[1]) {
1938  qps_changed = 1;
1939  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
1940  }
1941  }
1942 
1943  if (s->qps[0] != s1->qps[0])
1944  memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));
1945 
1946  if (qps_changed)
1947  copy_fields(s, s1, qps, superblock_count);
1948 #undef copy_fields
1949  }
1950 
1951  return update_frames(dst);
1952 }
1953 
1955  void *data, int *got_frame,
1956  AVPacket *avpkt)
1957 {
1958  const uint8_t *buf = avpkt->data;
1959  int buf_size = avpkt->size;
1960  Vp3DecodeContext *s = avctx->priv_data;
1961  GetBitContext gb;
1962  int i, ret;
1963 
1964  init_get_bits(&gb, buf, buf_size * 8);
1965 
1966  if (s->theora && get_bits1(&gb))
1967  {
1968  av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
1969  return -1;
1970  }
1971 
1972  s->keyframe = !get_bits1(&gb);
1973  if (!s->theora)
1974  skip_bits(&gb, 1);
1975  for (i = 0; i < 3; i++)
1976  s->last_qps[i] = s->qps[i];
1977 
1978  s->nqps=0;
1979  do{
1980  s->qps[s->nqps++]= get_bits(&gb, 6);
1981  } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb));
1982  for (i = s->nqps; i < 3; i++)
1983  s->qps[i] = -1;
1984 
1985  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1986  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
1987  s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]);
1988 
1989  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
1990  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);
1991 
1992  if (s->qps[0] != s->last_qps[0])
1993  init_loop_filter(s);
1994 
1995  for (i = 0; i < s->nqps; i++)
1996  // reinit all dequantizers if the first one changed, because
1997  // the DC of the first quantizer must be used for all matrices
1998  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
1999  init_dequantizer(s, i);
2000 
2001  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2002  return buf_size;
2003 
2004  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
2005  if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
2006  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2007  goto error;
2008  }
2009 
2010  if (!s->edge_emu_buffer)
2011  s->edge_emu_buffer = av_malloc(9*FFABS(s->current_frame.f->linesize[0]));
2012 
2013  if (s->keyframe) {
2014  if (!s->theora)
2015  {
2016  skip_bits(&gb, 4); /* width code */
2017  skip_bits(&gb, 4); /* height code */
2018  if (s->version)
2019  {
2020  s->version = get_bits(&gb, 5);
2021  if (avctx->frame_number == 0)
2022  av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
2023  }
2024  }
2025  if (s->version || s->theora)
2026  {
2027  if (get_bits1(&gb))
2028  av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
2029  skip_bits(&gb, 2); /* reserved? */
2030  }
2031  } else {
2032  if (!s->golden_frame.f->data[0]) {
2033  av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
2034 
2035  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2036  if (ff_thread_get_buffer(avctx, &s->golden_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
2037  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2038  goto error;
2039  }
2040  ff_thread_release_buffer(avctx, &s->last_frame);
2041  if ((ret = ff_thread_ref_frame(&s->last_frame, &s->golden_frame)) < 0)
2042  goto error;
2043  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2044  }
2045  }
2046 
2047  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2048  ff_thread_finish_setup(avctx);
2049 
2050  if (unpack_superblocks(s, &gb)){
2051  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2052  goto error;
2053  }
2054  if (unpack_modes(s, &gb)){
2055  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2056  goto error;
2057  }
2058  if (unpack_vectors(s, &gb)){
2059  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2060  goto error;
2061  }
2062  if (unpack_block_qpis(s, &gb)){
2063  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2064  goto error;
2065  }
2066  if (unpack_dct_coeffs(s, &gb)){
2067  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2068  goto error;
2069  }
2070 
2071  for (i = 0; i < 3; i++) {
2072  int height = s->height >> (i && s->chroma_y_shift);
2073  if (s->flipped_image)
2074  s->data_offset[i] = 0;
2075  else
2076  s->data_offset[i] = (height-1) * s->current_frame.f->linesize[i];
2077  }
2078 
2079  s->last_slice_end = 0;
2080  for (i = 0; i < s->c_superblock_height; i++)
2081  render_slice(s, i);
2082 
2083  // filter the last row
2084  for (i = 0; i < 3; i++) {
2085  int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1;
2086  apply_loop_filter(s, i, row, row+1);
2087  }
2088  vp3_draw_horiz_band(s, s->avctx->height);
2089 
2090  if ((ret = av_frame_ref(data, s->current_frame.f)) < 0)
2091  return ret;
2092  *got_frame = 1;
2093 
2094  if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME)) {
2095  ret = update_frames(avctx);
2096  if (ret < 0)
2097  return ret;
2098  }
2099 
2100  return buf_size;
2101 
2102 error:
2103  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2104 
2105  if (!HAVE_THREADS || !(s->avctx->active_thread_type&FF_THREAD_FRAME))
2106  av_frame_unref(s->current_frame.f);
2107 
2108  return -1;
2109 }
2110 
2112 {
2113  Vp3DecodeContext *s = avctx->priv_data;
2114 
2115  if (get_bits1(gb)) {
2116  int token;
2117  if (s->entries >= 32) { /* overflow */
2118  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2119  return -1;
2120  }
2121  token = get_bits(gb, 5);
2122  av_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n",
2123  s->hti, s->hbits, token, s->entries, s->huff_code_size);
2124  s->huffman_table[s->hti][token][0] = s->hbits;
2125  s->huffman_table[s->hti][token][1] = s->huff_code_size;
2126  s->entries++;
2127  }
2128  else {
2129  if (s->huff_code_size >= 32) {/* overflow */
2130  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2131  return -1;
2132  }
2133  s->huff_code_size++;
2134  s->hbits <<= 1;
2135  if (read_huffman_tree(avctx, gb))
2136  return -1;
2137  s->hbits |= 1;
2138  if (read_huffman_tree(avctx, gb))
2139  return -1;
2140  s->hbits >>= 1;
2141  s->huff_code_size--;
2142  }
2143  return 0;
2144 }
2145 
2147 {
2148  Vp3DecodeContext *s = avctx->priv_data;
2149 
2150  s->superblock_coding = NULL;
2151  s->all_fragments = NULL;
2152  s->coded_fragment_list[0] = NULL;
2153  s->dct_tokens_base = NULL;
2155  s->macroblock_coding = NULL;
2156  s->motion_val[0] = NULL;
2157  s->motion_val[1] = NULL;
2158  s->edge_emu_buffer = NULL;
2159 
2160  return init_frames(s);
2161 }
2162 
2163 #if CONFIG_THEORA_DECODER
2164 static const enum AVPixelFormat theora_pix_fmts[4] = {
2166 };
2167 
2168 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2169 {
2170  Vp3DecodeContext *s = avctx->priv_data;
2171  int visible_width, visible_height, colorspace;
2172  int offset_x = 0, offset_y = 0;
2173  int ret;
2174  AVRational fps, aspect;
2175 
2176  s->theora = get_bits_long(gb, 24);
2177  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2178 
2179  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2180  /* but previous versions have the image flipped relative to vp3 */
2181  if (s->theora < 0x030200)
2182  {
2183  s->flipped_image = 1;
2184  av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2185  }
2186 
2187  visible_width = s->width = get_bits(gb, 16) << 4;
2188  visible_height = s->height = get_bits(gb, 16) << 4;
2189 
2190  if (s->theora >= 0x030200) {
2191  visible_width = get_bits_long(gb, 24);
2192  visible_height = get_bits_long(gb, 24);
2193 
2194  offset_x = get_bits(gb, 8); /* offset x */
2195  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2196  }
2197 
2198  fps.num = get_bits_long(gb, 32);
2199  fps.den = get_bits_long(gb, 32);
2200  if (fps.num && fps.den) {
2201  if (fps.num < 0 || fps.den < 0) {
2202  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2203  return AVERROR_INVALIDDATA;
2204  }
2205  av_reduce(&avctx->time_base.num, &avctx->time_base.den,
2206  fps.den, fps.num, 1<<30);
2207  }
2208 
2209  aspect.num = get_bits_long(gb, 24);
2210  aspect.den = get_bits_long(gb, 24);
2211  if (aspect.num && aspect.den) {
2213  &avctx->sample_aspect_ratio.den,
2214  aspect.num, aspect.den, 1<<30);
2215  }
2216 
2217  if (s->theora < 0x030200)
2218  skip_bits(gb, 5); /* keyframe frequency force */
2219  colorspace = get_bits(gb, 8);
2220  skip_bits(gb, 24); /* bitrate */
2221 
2222  skip_bits(gb, 6); /* quality hint */
2223 
2224  if (s->theora >= 0x030200)
2225  {
2226  skip_bits(gb, 5); /* keyframe frequency force */
2227  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2228  skip_bits(gb, 3); /* reserved */
2229  }
2230 
2231 // align_get_bits(gb);
2232 
2233  if ( visible_width <= s->width && visible_width > s->width-16
2234  && visible_height <= s->height && visible_height > s->height-16
2235  && !offset_x && (offset_y == s->height - visible_height))
2236  ret = ff_set_dimensions(avctx, visible_width, visible_height);
2237  else
2238  ret = ff_set_dimensions(avctx, s->width, s->height);
2239  if (ret < 0)
2240  return ret;
2241 
2242  if (colorspace == 1) {
2244  } else if (colorspace == 2) {
2246  }
2247  if (colorspace == 1 || colorspace == 2) {
2248  avctx->colorspace = AVCOL_SPC_BT470BG;
2249  avctx->color_trc = AVCOL_TRC_BT709;
2250  }
2251 
2252  return 0;
2253 }
2254 
2255 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2256 {
2257  Vp3DecodeContext *s = avctx->priv_data;
2258  int i, n, matrices, inter, plane;
2259 
2260  if (s->theora >= 0x030200) {
2261  n = get_bits(gb, 3);
2262  /* loop filter limit values table */
2263  if (n)
2264  for (i = 0; i < 64; i++)
2265  s->filter_limit_values[i] = get_bits(gb, n);
2266  }
2267 
2268  if (s->theora >= 0x030200)
2269  n = get_bits(gb, 4) + 1;
2270  else
2271  n = 16;
2272  /* quality threshold table */
2273  for (i = 0; i < 64; i++)
2274  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2275 
2276  if (s->theora >= 0x030200)
2277  n = get_bits(gb, 4) + 1;
2278  else
2279  n = 16;
2280  /* dc scale factor table */
2281  for (i = 0; i < 64; i++)
2282  s->coded_dc_scale_factor[i] = get_bits(gb, n);
2283 
2284  if (s->theora >= 0x030200)
2285  matrices = get_bits(gb, 9) + 1;
2286  else
2287  matrices = 3;
2288 
2289  if(matrices > 384){
2290  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2291  return -1;
2292  }
2293 
2294  for(n=0; n<matrices; n++){
2295  for (i = 0; i < 64; i++)
2296  s->base_matrix[n][i]= get_bits(gb, 8);
2297  }
2298 
2299  for (inter = 0; inter <= 1; inter++) {
2300  for (plane = 0; plane <= 2; plane++) {
2301  int newqr= 1;
2302  if (inter || plane > 0)
2303  newqr = get_bits1(gb);
2304  if (!newqr) {
2305  int qtj, plj;
2306  if(inter && get_bits1(gb)){
2307  qtj = 0;
2308  plj = plane;
2309  }else{
2310  qtj= (3*inter + plane - 1) / 3;
2311  plj= (plane + 2) % 3;
2312  }
2313  s->qr_count[inter][plane]= s->qr_count[qtj][plj];
2314  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
2315  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
2316  } else {
2317  int qri= 0;
2318  int qi = 0;
2319 
2320  for(;;){
2321  i= get_bits(gb, av_log2(matrices-1)+1);
2322  if(i>= matrices){
2323  av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
2324  return -1;
2325  }
2326  s->qr_base[inter][plane][qri]= i;
2327  if(qi >= 63)
2328  break;
2329  i = get_bits(gb, av_log2(63-qi)+1) + 1;
2330  s->qr_size[inter][plane][qri++]= i;
2331  qi += i;
2332  }
2333 
2334  if (qi > 63) {
2335  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2336  return -1;
2337  }
2338  s->qr_count[inter][plane]= qri;
2339  }
2340  }
2341  }
2342 
2343  /* Huffman tables */
2344  for (s->hti = 0; s->hti < 80; s->hti++) {
2345  s->entries = 0;
2346  s->huff_code_size = 1;
2347  if (!get_bits1(gb)) {
2348  s->hbits = 0;
2349  if(read_huffman_tree(avctx, gb))
2350  return -1;
2351  s->hbits = 1;
2352  if(read_huffman_tree(avctx, gb))
2353  return -1;
2354  }
2355  }
2356 
2357  s->theora_tables = 1;
2358 
2359  return 0;
2360 }
2361 
2362 static av_cold int theora_decode_init(AVCodecContext *avctx)
2363 {
2364  Vp3DecodeContext *s = avctx->priv_data;
2365  GetBitContext gb;
2366  int ptype;
2367  uint8_t *header_start[3];
2368  int header_len[3];
2369  int i;
2370 
2371  s->theora = 1;
2372 
2373  if (!avctx->extradata_size)
2374  {
2375  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2376  return -1;
2377  }
2378 
2380  42, header_start, header_len) < 0) {
2381  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
2382  return -1;
2383  }
2384 
2385  for(i=0;i<3;i++) {
2386  if (header_len[i] <= 0)
2387  continue;
2388  init_get_bits(&gb, header_start[i], header_len[i] * 8);
2389 
2390  ptype = get_bits(&gb, 8);
2391 
2392  if (!(ptype & 0x80))
2393  {
2394  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2395 // return -1;
2396  }
2397 
2398  // FIXME: Check for this as well.
2399  skip_bits_long(&gb, 6*8); /* "theora" */
2400 
2401  switch(ptype)
2402  {
2403  case 0x80:
2404  theora_decode_header(avctx, &gb);
2405  break;
2406  case 0x81:
2407 // FIXME: is this needed? it breaks sometimes
2408 // theora_decode_comments(avctx, gb);
2409  break;
2410  case 0x82:
2411  if (theora_decode_tables(avctx, &gb))
2412  return -1;
2413  break;
2414  default:
2415  av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
2416  break;
2417  }
2418  if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb))
2419  av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
2420  if (s->theora < 0x030200)
2421  break;
2422  }
2423 
2424  return vp3_decode_init(avctx);
2425 }
2426 
2427 AVCodec ff_theora_decoder = {
2428  .name = "theora",
2429  .long_name = NULL_IF_CONFIG_SMALL("Theora"),
2430  .type = AVMEDIA_TYPE_VIDEO,
2431  .id = AV_CODEC_ID_THEORA,
2432  .priv_data_size = sizeof(Vp3DecodeContext),
2433  .init = theora_decode_init,
2434  .close = vp3_decode_end,
2436  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2439  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2440  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context)
2441 };
2442 #endif
2443 
2445  .name = "vp3",
2446  .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
2447  .type = AVMEDIA_TYPE_VIDEO,
2448  .id = AV_CODEC_ID_VP3,
2449  .priv_data_size = sizeof(Vp3DecodeContext),
2450  .init = vp3_decode_init,
2451  .close = vp3_decode_end,
2453  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
2456  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
2457  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
2458 };
#define BLOCK_Y
static const int16_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
Definition: vp3dsp.h:36
int last_slice_end
Definition: vp3.c:146
uint8_t idct_scantable[64]
Definition: vp3.c:140
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
discard all frames except keyframes
Definition: avcodec.h:545
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
#define AV_NUM_DATA_POINTERS
Definition: frame.h:108
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:232
static int init_block_mapping(Vp3DecodeContext *s)
Definition: vp3.c:321
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:41
void(* h_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:45
#define SB_NOT_CODED
Definition: vp3.c:57
#define TOKEN_EOB(eob_run)
Definition: vp3.c:205
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:1457
#define PUR
int y_superblock_count
Definition: vp3.c:156
int bounding_values_array[256+2]
Definition: vp3.c:254
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
uint16_t qr_base[2][3][64]
Definition: vp3.c:184
AVFrame * f
Definition: thread.h:36
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:144
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:199
VLC mode_code_vlc
Definition: vp3.c:227
int y_superblock_width
Definition: vp3.c:154
static const uint16_t fragment_run_length_vlc_table[30][2]
Definition: vp3data.h:119
HpelDSPContext hdsp
Definition: vp3.c:141
#define MODE_INTER_PLUS_MV
Definition: vp3.c:68
int num
numerator
Definition: rational.h:44
int size
Definition: avcodec.h:974
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:1662
int u_superblock_start
Definition: vp3.c:160
#define BLOCK_X
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1422
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:576
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1247
uint8_t coding_method
Definition: vp3.c:53
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:1678
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:422
#define VLC_TYPE
Definition: get_bits.h:62
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1133
discard all
Definition: avcodec.h:546
VLC ac_vlc_4[16]
Definition: vp3.c:223
VLC motion_vector_vlc
Definition: vp3.c:228
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:273
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int huff_code_size
Definition: vp3.c:250
int * superblock_fragments
Definition: vp3.c:238
VLC superblock_run_length_vlc
Definition: vp3.c:225
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2755
static const uint32_t vp31_ac_scale_factor[64]
Definition: vp3data.h:76
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:64
static const int motion_vector_table[63]
Definition: vp3data.h:179
static const uint16_t ac_bias_3[16][32][2]
Definition: vp3data.h:2634
static const uint16_t dc_bias[16][32][2]
Definition: vp3data.h:446
Vp3Fragment * all_fragments
Definition: vp3.c:172
#define FFALIGN(x, a)
Definition: common.h:62
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:393
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1129
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1173
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
void(* idct_dc_add)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:43
Definition: vf_drawbox.c:37
int y_superblock_height
Definition: vp3.c:155
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:269
uint8_t
#define av_cold
Definition: attributes.h:66
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:682
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:43
VLC ac_vlc_1[16]
Definition: vp3.c:220
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:206
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2349
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1350
unsigned int hbits
Definition: vp3.c:248
Multithreading support functions.
int macroblock_width
Definition: vp3.c:165
uint8_t idct_permutation[64]
Definition: vp3.c:139
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:174
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:352
#define emms_c()
Definition: internal.h:46
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1162
uint8_t qpi
Definition: vp3.c:54
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:261
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:711
#define DC_COEFF(u)
Definition: vp3.c:1131
mpeg1, jpeg, h263
Definition: avcodec.h:608
const char data[16]
Definition: mxf.c:66
uint8_t * data
Definition: avcodec.h:973
uint8_t filter_limit_values[64]
Definition: vp3.c:253
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2237
bitstream reader API header.
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:685
VLC ac_vlc_2[16]
Definition: vp3.c:221
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
static const uint8_t mode_code_vlc_table[8][2]
Definition: vp3data.h:144
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1761
#define MODE_INTRA
Definition: vp3.c:67
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1037
static const int16_t vp31_inter_dequant[64]
Definition: vp3data.h:54
static const uint16_t ac_bias_1[16][32][2]
Definition: vp3data.h:1540
int height
Definition: vp3.c:133
static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
Definition: vp3.c:1890
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:555
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:1954
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:151
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, int dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1012
VP3DSPContext vp3dsp
Definition: vp3.c:143
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int c_superblock_width
Definition: vp3.c:157
uint8_t qr_count[2][3]
Definition: vp3.c:182
int fragment_height[2]
Definition: vp3.c:170
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:64
#define AVERROR(e)
Definition: error.h:43
#define T(x)
VLC ac_vlc_3[16]
Definition: vp3.c:222
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:55
#define CODING_MODE_COUNT
Definition: vp3.c:74
static const int zero_run_base[32]
Definition: vp3data.h:208
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:142
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2533
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:189
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1142
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:148
int theora
Definition: vp3.c:131
const char * name
Name of the codec implementation.
Definition: avcodec.h:2762
#define FFMAX(a, b)
Definition: common.h:55
int qps[3]
Definition: vp3.c:149
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:80
static const int16_t vp31_intra_c_dequant[64]
Definition: vp3data.h:42
Definition: get_bits.h:64
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:182
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
static const int coeff_get_bits[32]
Definition: vp3data.h:223
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
also ITU-R BT1361
Definition: avcodec.h:562
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:408
int chroma_y_shift
Definition: vp3.c:134
int flipped_image
Definition: vp3.c:145
unsigned char * macroblock_coding
Definition: vp3.c:242
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:37
Half-pel DSP context.
Definition: hpeldsp.h:45
int fragment_width[2]
Definition: vp3.c:169
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1281
#define SET_CHROMA_MODES
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2525
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:705
#define FFMIN(a, b)
Definition: common.h:57
VLC fragment_run_length_vlc
Definition: vp3.c:226
#define PU
int macroblock_height
Definition: vp3.c:166
int width
picture width / height.
Definition: avcodec.h:1217
#define SB_PARTIALLY_CODED
Definition: vp3.c:58
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:913
#define CONFIG_GRAY
Definition: config.h:330
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
uint8_t * edge_emu_buffer
Definition: vp3.c:244
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
Definition: vp3dsp.h:42
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1733
#define MODE_COPY
Definition: vp3.c:77
#define FFABS(a)
Definition: common.h:52
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:522
static const uint16_t ac_bias_2[16][32][2]
Definition: vp3data.h:2087
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:120
int macroblock_count
Definition: vp3.c:164
int c_superblock_height
Definition: vp3.c:158
#define HAVE_THREADS
Definition: config.h:250
int total_num_coded_frags
Definition: vp3.c:213
int c_superblock_count
Definition: vp3.c:159
if(ac->has_optimized_func)
AVCodec ff_vp3_decoder
Definition: vp3.c:2444
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1283
static const int8_t transform[32][32]
Definition: hevcdsp.c:25
NULL
Definition: eval.c:55
Half-pel DSP functions.
static int width
Definition: utils.c:156
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
int superblock_count
Definition: vp3.c:153
Libavcodec external API header.
int entries
Definition: vp3.c:249
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: avcodec.h:553
static const uint16_t ac_bias_0[16][32][2]
Definition: vp3data.h:993
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:125
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:203
int skip_loop_filter
Definition: vp3.c:147
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
ThreadFrame current_frame
Definition: vp3.c:137
main external API structure.
Definition: avcodec.h:1054
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:489
#define RSHIFT(a, b)
Definition: common.h:49
int last_qps[3]
Definition: vp3.c:151
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1080
uint8_t qr_size[2][3][64]
Definition: vp3.c:183
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: get_bits.h:424
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define PUL
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:1631
int data_offset[3]
Definition: vp3.c:174
int extradata_size
Definition: avcodec.h:1163
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
int index
Definition: gxfenc.c:72
#define SB_FULLY_CODED
Definition: vp3.c:59
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1747
rational number numerator/denominator
Definition: rational.h:43
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1740
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int avpriv_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
Definition: xiph.c:24
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:212
int keyframe
Definition: vp3.c:138
#define TOKEN_COEFF(coeff)
Definition: vp3.c:207
#define MODE_GOLDEN_MV
Definition: vp3.c:72
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:79
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:304
#define FRAGMENT_PIXELS
Definition: vp3.c:48
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
Definition: vp3.c:2111
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:1860
static const uint16_t superblock_run_length_vlc_table[34][2]
Definition: vp3data.h:98
#define MODE_USING_GOLDEN
Definition: vp3.c:71
uint32_t huffman_table[80][32][2]
Definition: vp3.c:251
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:273
#define MODE_INTER_FOURMV
Definition: vp3.c:73
int16_t block[64]
Definition: vp3.c:144
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:113
#define copy_fields(to, from, start_field, end_field)
int v_superblock_start
Definition: vp3.c:161
int height
Definition: gxfenc.c:72
int version
Definition: vp3.c:132
int * coded_fragment_list[3]
Definition: vp3.c:217
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
unsigned char * superblock_coding
Definition: vp3.c:162
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:113
ThreadFrame last_frame
Definition: vp3.c:136
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:782
void(* v_loop_filter)(uint8_t *src, int stride, int *bounding_values)
Definition: vp3dsp.h:44
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
int16_t * dct_tokens_base
Definition: vp3.c:204
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1882
AVCodecContext * avctx
Definition: vp3.c:130
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1766
VideoDSPContext vdsp
Definition: vp3.c:142
static const int eob_run_get_bits[7]
Definition: vp3data.h:204
FF_ENABLE_DEPRECATION_WARNINGS int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1533
static int vp3_init_thread_copy(AVCodecContext *avctx)
Definition: vp3.c:2146
static const int16_t vp31_dc_scale_factor[64]
Definition: vp3data.h:65
uint16_t coded_dc_scale_factor[64]
Definition: vp3.c:179
int den
denominator
Definition: rational.h:45
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:498
Core video DSP helper functions.
uint8_t base_matrix[384][64]
Definition: vp3.c:181
DSP utils.
int fragment_count
Definition: vp3.c:168
void * priv_data
Definition: avcodec.h:1090
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:859
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1435
#define av_log2
Definition: intmath.h:85
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1098
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:66
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:70
#define MODE_INTER_NO_MV
Definition: vp3.c:66
static const int eob_run_base[7]
Definition: vp3data.h:201
int fragment_start[3]
Definition: vp3.c:173
int theora_tables
Definition: vp3.c:131
static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: vp3.c:1900
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:117
#define MODE_INTER_LAST_MV
Definition: vp3.c:69
ThreadFrame golden_frame
Definition: vp3.c:135
int chroma_x_shift
Definition: vp3.c:134
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: avcodec.h:584
enum AVColorSpace colorspace
Definition: dirac.c:100
static const int zero_run_get_bits[32]
Definition: vp3data.h:215
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:280
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:87
#define MKTAG(a, b, c, d)
Definition: common.h:238
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1395
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:333
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:877
int16_t dc
Definition: vp3.c:52
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:180
Predicted.
Definition: avutil.h:254
VLC dc_vlc[16]
Definition: vp3.c:219
#define PL
int8_t(*[2] motion_val)[2]
Definition: vp3.c:176
static int16_t block[64]
Definition: dct-test.c:170