Libav
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpeg_er.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "qpeldsp.h"
38 #include "vc1.h"
39 #include "vc1data.h"
40 #include "vc1acdata.h"
41 #include "msmpeg4data.h"
42 #include "unary.h"
43 #include "mathops.h"
44 
45 #undef NDEBUG
46 #include <assert.h>
47 
48 #define MB_INTRA_VLC_BITS 9
49 #define DC_VLC_BITS 9
50 
51 
52 // offset tables for interlaced picture MVDATA decoding
53 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
55 
56 /***********************************************************************/
67 enum Imode {
75 }; //imode defines
77 
79 {
80  MpegEncContext *s = &v->s;
82  if (v->field_mode && !(v->second_field ^ v->tff)) {
83  s->dest[0] += s->current_picture_ptr->f->linesize[0];
84  s->dest[1] += s->current_picture_ptr->f->linesize[1];
85  s->dest[2] += s->current_picture_ptr->f->linesize[2];
86  }
87 }
88  //Bitplane group
90 
92 {
93  MpegEncContext *s = &v->s;
94  int topleft_mb_pos, top_mb_pos;
95  int stride_y, fieldtx = 0;
96  int v_dist;
97 
98  /* The put pixels loop is always one MB row behind the decoding loop,
99  * because we can only put pixels when overlap filtering is done, and
100  * for filtering of the bottom edge of a MB, we need the next MB row
101  * present as well.
102  * Within the row, the put pixels loop is also one MB col behind the
103  * decoding loop. The reason for this is again, because for filtering
104  * of the right MB edge, we need the next MB present. */
105  if (!s->first_slice_line) {
106  if (s->mb_x) {
107  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
108  if (v->fcm == ILACE_FRAME)
109  fieldtx = v->fieldtx_plane[topleft_mb_pos];
110  stride_y = s->linesize << fieldtx;
111  v_dist = (16 - fieldtx) >> (fieldtx == 0);
113  s->dest[0] - 16 * s->linesize - 16,
114  stride_y);
116  s->dest[0] - 16 * s->linesize - 8,
117  stride_y);
119  s->dest[0] - v_dist * s->linesize - 16,
120  stride_y);
122  s->dest[0] - v_dist * s->linesize - 8,
123  stride_y);
125  s->dest[1] - 8 * s->uvlinesize - 8,
126  s->uvlinesize);
128  s->dest[2] - 8 * s->uvlinesize - 8,
129  s->uvlinesize);
130  }
131  if (s->mb_x == s->mb_width - 1) {
132  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
133  if (v->fcm == ILACE_FRAME)
134  fieldtx = v->fieldtx_plane[top_mb_pos];
135  stride_y = s->linesize << fieldtx;
136  v_dist = fieldtx ? 15 : 8;
138  s->dest[0] - 16 * s->linesize,
139  stride_y);
141  s->dest[0] - 16 * s->linesize + 8,
142  stride_y);
144  s->dest[0] - v_dist * s->linesize,
145  stride_y);
147  s->dest[0] - v_dist * s->linesize + 8,
148  stride_y);
150  s->dest[1] - 8 * s->uvlinesize,
151  s->uvlinesize);
153  s->dest[2] - 8 * s->uvlinesize,
154  s->uvlinesize);
155  }
156  }
157 
158 #define inc_blk_idx(idx) do { \
159  idx++; \
160  if (idx >= v->n_allocated_blks) \
161  idx = 0; \
162  } while (0)
163 
168 }
169 
170 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
171 {
172  MpegEncContext *s = &v->s;
173  int j;
174  if (!s->first_slice_line) {
175  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
176  if (s->mb_x)
177  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
179  for (j = 0; j < 2; j++) {
180  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
181  if (s->mb_x)
182  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
183  }
184  }
185  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
186 
187  if (s->mb_y == s->end_mb_y - 1) {
188  if (s->mb_x) {
189  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
190  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
191  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
192  }
193  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
194  }
195 }
196 
198 {
199  MpegEncContext *s = &v->s;
200  int j;
201 
202  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
203  * means it runs two rows/cols behind the decoding loop. */
204  if (!s->first_slice_line) {
205  if (s->mb_x) {
206  if (s->mb_y >= s->start_mb_y + 2) {
207  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
208 
209  if (s->mb_x >= 2)
210  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
211  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
212  for (j = 0; j < 2; j++) {
213  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
214  if (s->mb_x >= 2) {
215  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
216  }
217  }
218  }
219  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
220  }
221 
222  if (s->mb_x == s->mb_width - 1) {
223  if (s->mb_y >= s->start_mb_y + 2) {
224  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
225 
226  if (s->mb_x)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
229  for (j = 0; j < 2; j++) {
230  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
231  if (s->mb_x >= 2) {
232  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
233  }
234  }
235  }
236  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
237  }
238 
239  if (s->mb_y == s->end_mb_y) {
240  if (s->mb_x) {
241  if (s->mb_x >= 2)
242  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
243  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
244  if (s->mb_x >= 2) {
245  for (j = 0; j < 2; j++) {
246  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
247  }
248  }
249  }
250 
251  if (s->mb_x == s->mb_width - 1) {
252  if (s->mb_x)
253  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
254  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
255  if (s->mb_x) {
256  for (j = 0; j < 2; j++) {
257  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
258  }
259  }
260  }
261  }
262  }
263 }
264 
266 {
267  MpegEncContext *s = &v->s;
268  int mb_pos;
269 
270  if (v->condover == CONDOVER_NONE)
271  return;
272 
273  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
274 
275  /* Within a MB, the horizontal overlap always runs before the vertical.
276  * To accomplish that, we run the H on left and internal borders of the
277  * currently decoded MB. Then, we wait for the next overlap iteration
278  * to do H overlap on the right edge of this MB, before moving over and
279  * running the V overlap. Therefore, the V overlap makes us trail by one
280  * MB col and the H overlap filter makes us trail by one MB row. This
281  * is reflected in the time at which we run the put_pixels loop. */
282  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
283  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
284  v->over_flags_plane[mb_pos - 1])) {
286  v->block[v->cur_blk_idx][0]);
288  v->block[v->cur_blk_idx][2]);
289  if (!(s->flags & CODEC_FLAG_GRAY)) {
291  v->block[v->cur_blk_idx][4]);
293  v->block[v->cur_blk_idx][5]);
294  }
295  }
297  v->block[v->cur_blk_idx][1]);
299  v->block[v->cur_blk_idx][3]);
300 
301  if (s->mb_x == s->mb_width - 1) {
302  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
303  v->over_flags_plane[mb_pos - s->mb_stride])) {
305  v->block[v->cur_blk_idx][0]);
307  v->block[v->cur_blk_idx][1]);
308  if (!(s->flags & CODEC_FLAG_GRAY)) {
310  v->block[v->cur_blk_idx][4]);
312  v->block[v->cur_blk_idx][5]);
313  }
314  }
316  v->block[v->cur_blk_idx][2]);
318  v->block[v->cur_blk_idx][3]);
319  }
320  }
321  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
322  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
323  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
325  v->block[v->left_blk_idx][0]);
327  v->block[v->left_blk_idx][1]);
328  if (!(s->flags & CODEC_FLAG_GRAY)) {
330  v->block[v->left_blk_idx][4]);
332  v->block[v->left_blk_idx][5]);
333  }
334  }
336  v->block[v->left_blk_idx][2]);
338  v->block[v->left_blk_idx][3]);
339  }
340 }
341 
345 static void vc1_mc_1mv(VC1Context *v, int dir)
346 {
347  MpegEncContext *s = &v->s;
348  H264ChromaContext *h264chroma = &v->h264chroma;
349  uint8_t *srcY, *srcU, *srcV;
350  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
351  int v_edge_pos = s->v_edge_pos >> v->field_mode;
352  int i;
353  uint8_t (*luty)[256], (*lutuv)[256];
354  int use_ic;
355 
356  if ((!v->field_mode ||
357  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
358  !v->s.last_picture.f->data[0])
359  return;
360 
361  mx = s->mv[dir][0][0];
362  my = s->mv[dir][0][1];
363 
364  // store motion vectors for further use in B frames
365  if (s->pict_type == AV_PICTURE_TYPE_P) {
366  for (i = 0; i < 4; i++) {
367  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
368  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
369  }
370  }
371 
372  uvmx = (mx + ((mx & 3) == 3)) >> 1;
373  uvmy = (my + ((my & 3) == 3)) >> 1;
374  v->luma_mv[s->mb_x][0] = uvmx;
375  v->luma_mv[s->mb_x][1] = uvmy;
376 
377  if (v->field_mode &&
378  v->cur_field_type != v->ref_field_type[dir]) {
379  my = my - 2 + 4 * v->cur_field_type;
380  uvmy = uvmy - 2 + 4 * v->cur_field_type;
381  }
382 
383  // fastuvmc shall be ignored for interlaced frame picture
384  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
385  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
386  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
387  }
388  if (!dir) {
389  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
390  srcY = s->current_picture.f->data[0];
391  srcU = s->current_picture.f->data[1];
392  srcV = s->current_picture.f->data[2];
393  luty = v->curr_luty;
394  lutuv = v->curr_lutuv;
395  use_ic = v->curr_use_ic;
396  } else {
397  srcY = s->last_picture.f->data[0];
398  srcU = s->last_picture.f->data[1];
399  srcV = s->last_picture.f->data[2];
400  luty = v->last_luty;
401  lutuv = v->last_lutuv;
402  use_ic = v->last_use_ic;
403  }
404  } else {
405  srcY = s->next_picture.f->data[0];
406  srcU = s->next_picture.f->data[1];
407  srcV = s->next_picture.f->data[2];
408  luty = v->next_luty;
409  lutuv = v->next_lutuv;
410  use_ic = v->next_use_ic;
411  }
412 
413  if (!srcY || !srcU) {
414  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
415  return;
416  }
417 
418  src_x = s->mb_x * 16 + (mx >> 2);
419  src_y = s->mb_y * 16 + (my >> 2);
420  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
421  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
422 
423  if (v->profile != PROFILE_ADVANCED) {
424  src_x = av_clip( src_x, -16, s->mb_width * 16);
425  src_y = av_clip( src_y, -16, s->mb_height * 16);
426  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
427  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
428  } else {
429  src_x = av_clip( src_x, -17, s->avctx->coded_width);
430  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
431  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
432  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
433  }
434 
435  srcY += src_y * s->linesize + src_x;
436  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
437  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
438 
439  if (v->field_mode && v->ref_field_type[dir]) {
440  srcY += s->current_picture_ptr->f->linesize[0];
441  srcU += s->current_picture_ptr->f->linesize[1];
442  srcV += s->current_picture_ptr->f->linesize[2];
443  }
444 
445  /* for grayscale we should not try to read from unknown area */
446  if (s->flags & CODEC_FLAG_GRAY) {
447  srcU = s->edge_emu_buffer + 18 * s->linesize;
448  srcV = s->edge_emu_buffer + 18 * s->linesize;
449  }
450 
451  if (v->rangeredfrm || use_ic
452  || s->h_edge_pos < 22 || v_edge_pos < 22
453  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
454  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
455  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
456 
457  srcY -= s->mspel * (1 + s->linesize);
459  s->linesize, s->linesize,
460  17 + s->mspel * 2, 17 + s->mspel * 2,
461  src_x - s->mspel, src_y - s->mspel,
462  s->h_edge_pos, v_edge_pos);
463  srcY = s->edge_emu_buffer;
464  s->vdsp.emulated_edge_mc(uvbuf, srcU,
465  s->uvlinesize, s->uvlinesize,
466  8 + 1, 8 + 1,
467  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
468  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
469  s->uvlinesize, s->uvlinesize,
470  8 + 1, 8 + 1,
471  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
472  srcU = uvbuf;
473  srcV = uvbuf + 16;
474  /* if we deal with range reduction we need to scale source blocks */
475  if (v->rangeredfrm) {
476  int i, j;
477  uint8_t *src, *src2;
478 
479  src = srcY;
480  for (j = 0; j < 17 + s->mspel * 2; j++) {
481  for (i = 0; i < 17 + s->mspel * 2; i++)
482  src[i] = ((src[i] - 128) >> 1) + 128;
483  src += s->linesize;
484  }
485  src = srcU;
486  src2 = srcV;
487  for (j = 0; j < 9; j++) {
488  for (i = 0; i < 9; i++) {
489  src[i] = ((src[i] - 128) >> 1) + 128;
490  src2[i] = ((src2[i] - 128) >> 1) + 128;
491  }
492  src += s->uvlinesize;
493  src2 += s->uvlinesize;
494  }
495  }
496  /* if we deal with intensity compensation we need to scale source blocks */
497  if (use_ic) {
498  int i, j;
499  uint8_t *src, *src2;
500 
501  src = srcY;
502  for (j = 0; j < 17 + s->mspel * 2; j++) {
503  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
504  for (i = 0; i < 17 + s->mspel * 2; i++)
505  src[i] = luty[f][src[i]];
506  src += s->linesize;
507  }
508  src = srcU;
509  src2 = srcV;
510  for (j = 0; j < 9; j++) {
511  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
512  for (i = 0; i < 9; i++) {
513  src[i] = lutuv[f][src[i]];
514  src2[i] = lutuv[f][src2[i]];
515  }
516  src += s->uvlinesize;
517  src2 += s->uvlinesize;
518  }
519  }
520  srcY += s->mspel * (1 + s->linesize);
521  }
522 
523  if (s->mspel) {
524  dxy = ((my & 3) << 2) | (mx & 3);
525  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
526  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
527  srcY += s->linesize * 8;
528  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
529  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
530  } else { // hpel mc - always used for luma
531  dxy = (my & 2) | ((mx & 2) >> 1);
532  if (!v->rnd)
533  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
534  else
535  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
536  }
537 
538  if (s->flags & CODEC_FLAG_GRAY) return;
539  /* Chroma MC always uses qpel bilinear */
540  uvmx = (uvmx & 3) << 1;
541  uvmy = (uvmy & 3) << 1;
542  if (!v->rnd) {
543  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
545  } else {
546  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
547  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
548  }
549 }
550 
551 static inline int median4(int a, int b, int c, int d)
552 {
553  if (a < b) {
554  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
555  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
556  } else {
557  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
558  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
559  }
560 }
561 
564 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
565 {
566  MpegEncContext *s = &v->s;
567  uint8_t *srcY;
568  int dxy, mx, my, src_x, src_y;
569  int off;
570  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
571  int v_edge_pos = s->v_edge_pos >> v->field_mode;
572  uint8_t (*luty)[256];
573  int use_ic;
574 
575  if ((!v->field_mode ||
576  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
577  !v->s.last_picture.f->data[0])
578  return;
579 
580  mx = s->mv[dir][n][0];
581  my = s->mv[dir][n][1];
582 
583  if (!dir) {
584  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
585  srcY = s->current_picture.f->data[0];
586  luty = v->curr_luty;
587  use_ic = v->curr_use_ic;
588  } else {
589  srcY = s->last_picture.f->data[0];
590  luty = v->last_luty;
591  use_ic = v->last_use_ic;
592  }
593  } else {
594  srcY = s->next_picture.f->data[0];
595  luty = v->next_luty;
596  use_ic = v->next_use_ic;
597  }
598 
599  if (!srcY) {
600  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
601  return;
602  }
603 
604  if (v->field_mode) {
605  if (v->cur_field_type != v->ref_field_type[dir])
606  my = my - 2 + 4 * v->cur_field_type;
607  }
608 
609  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
610  int same_count = 0, opp_count = 0, k;
611  int chosen_mv[2][4][2], f;
612  int tx, ty;
613  for (k = 0; k < 4; k++) {
614  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
615  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
616  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
617  opp_count += f;
618  same_count += 1 - f;
619  }
620  f = opp_count > same_count;
621  switch (f ? opp_count : same_count) {
622  case 4:
623  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
624  chosen_mv[f][2][0], chosen_mv[f][3][0]);
625  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
626  chosen_mv[f][2][1], chosen_mv[f][3][1]);
627  break;
628  case 3:
629  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
630  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
631  break;
632  case 2:
633  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
634  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
635  break;
636  }
637  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
638  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
639  for (k = 0; k < 4; k++)
640  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
641  }
642 
643  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
644  int qx, qy;
645  int width = s->avctx->coded_width;
646  int height = s->avctx->coded_height >> 1;
647  if (s->pict_type == AV_PICTURE_TYPE_P) {
648  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
649  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
650  }
651  qx = (s->mb_x * 16) + (mx >> 2);
652  qy = (s->mb_y * 8) + (my >> 3);
653 
654  if (qx < -17)
655  mx -= 4 * (qx + 17);
656  else if (qx > width)
657  mx -= 4 * (qx - width);
658  if (qy < -18)
659  my -= 8 * (qy + 18);
660  else if (qy > height + 1)
661  my -= 8 * (qy - height - 1);
662  }
663 
664  if ((v->fcm == ILACE_FRAME) && fieldmv)
665  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
666  else
667  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
668 
669  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
670  if (!fieldmv)
671  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
672  else
673  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
674 
675  if (v->profile != PROFILE_ADVANCED) {
676  src_x = av_clip(src_x, -16, s->mb_width * 16);
677  src_y = av_clip(src_y, -16, s->mb_height * 16);
678  } else {
679  src_x = av_clip(src_x, -17, s->avctx->coded_width);
680  if (v->fcm == ILACE_FRAME) {
681  if (src_y & 1)
682  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
683  else
684  src_y = av_clip(src_y, -18, s->avctx->coded_height);
685  } else {
686  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
687  }
688  }
689 
690  srcY += src_y * s->linesize + src_x;
691  if (v->field_mode && v->ref_field_type[dir])
692  srcY += s->current_picture_ptr->f->linesize[0];
693 
694  if (fieldmv && !(src_y & 1))
695  v_edge_pos--;
696  if (fieldmv && (src_y & 1) && src_y < 4)
697  src_y--;
698  if (v->rangeredfrm || use_ic
699  || s->h_edge_pos < 13 || v_edge_pos < 23
700  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
701  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
702  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
703  /* check emulate edge stride and offset */
705  s->linesize, s->linesize,
706  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
707  src_x - s->mspel, src_y - (s->mspel << fieldmv),
708  s->h_edge_pos, v_edge_pos);
709  srcY = s->edge_emu_buffer;
710  /* if we deal with range reduction we need to scale source blocks */
711  if (v->rangeredfrm) {
712  int i, j;
713  uint8_t *src;
714 
715  src = srcY;
716  for (j = 0; j < 9 + s->mspel * 2; j++) {
717  for (i = 0; i < 9 + s->mspel * 2; i++)
718  src[i] = ((src[i] - 128) >> 1) + 128;
719  src += s->linesize << fieldmv;
720  }
721  }
722  /* if we deal with intensity compensation we need to scale source blocks */
723  if (use_ic) {
724  int i, j;
725  uint8_t *src;
726 
727  src = srcY;
728  for (j = 0; j < 9 + s->mspel * 2; j++) {
729  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
730  for (i = 0; i < 9 + s->mspel * 2; i++)
731  src[i] = luty[f][src[i]];
732  src += s->linesize << fieldmv;
733  }
734  }
735  srcY += s->mspel * (1 + (s->linesize << fieldmv));
736  }
737 
738  if (s->mspel) {
739  dxy = ((my & 3) << 2) | (mx & 3);
740  if (avg)
741  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
742  else
743  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
744  } else { // hpel mc - always used for luma
745  dxy = (my & 2) | ((mx & 2) >> 1);
746  if (!v->rnd)
747  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
748  else
749  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
750  }
751 }
752 
753 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
754 {
755  int idx, i;
756  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
757 
758  idx = ((a[3] != flag) << 3)
759  | ((a[2] != flag) << 2)
760  | ((a[1] != flag) << 1)
761  | (a[0] != flag);
762  if (!idx) {
763  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
764  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
765  return 4;
766  } else if (count[idx] == 1) {
767  switch (idx) {
768  case 0x1:
769  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
770  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
771  return 3;
772  case 0x2:
773  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
774  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
775  return 3;
776  case 0x4:
777  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
778  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
779  return 3;
780  case 0x8:
781  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
782  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
783  return 3;
784  }
785  } else if (count[idx] == 2) {
786  int t1 = 0, t2 = 0;
787  for (i = 0; i < 3; i++)
788  if (!a[i]) {
789  t1 = i;
790  break;
791  }
792  for (i = t1 + 1; i < 4; i++)
793  if (!a[i]) {
794  t2 = i;
795  break;
796  }
797  *tx = (mvx[t1] + mvx[t2]) / 2;
798  *ty = (mvy[t1] + mvy[t2]) / 2;
799  return 2;
800  } else {
801  return 0;
802  }
803  return -1;
804 }
805 
808 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
809 {
810  MpegEncContext *s = &v->s;
811  H264ChromaContext *h264chroma = &v->h264chroma;
812  uint8_t *srcU, *srcV;
813  int uvmx, uvmy, uvsrc_x, uvsrc_y;
814  int k, tx = 0, ty = 0;
815  int mvx[4], mvy[4], intra[4], mv_f[4];
816  int valid_count;
817  int chroma_ref_type = v->cur_field_type;
818  int v_edge_pos = s->v_edge_pos >> v->field_mode;
819  uint8_t (*lutuv)[256];
820  int use_ic;
821 
822  if (!v->field_mode && !v->s.last_picture.f->data[0])
823  return;
824  if (s->flags & CODEC_FLAG_GRAY)
825  return;
826 
827  for (k = 0; k < 4; k++) {
828  mvx[k] = s->mv[dir][k][0];
829  mvy[k] = s->mv[dir][k][1];
830  intra[k] = v->mb_type[0][s->block_index[k]];
831  if (v->field_mode)
832  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
833  }
834 
835  /* calculate chroma MV vector from four luma MVs */
836  if (!v->field_mode || (v->field_mode && !v->numref)) {
837  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
838  chroma_ref_type = v->reffield;
839  if (!valid_count) {
840  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
841  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
842  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
843  return; //no need to do MC for intra blocks
844  }
845  } else {
846  int dominant = 0;
847  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
848  dominant = 1;
849  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
850  if (dominant)
851  chroma_ref_type = !v->cur_field_type;
852  }
853  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
854  return;
855  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
856  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
857  uvmx = (tx + ((tx & 3) == 3)) >> 1;
858  uvmy = (ty + ((ty & 3) == 3)) >> 1;
859 
860  v->luma_mv[s->mb_x][0] = uvmx;
861  v->luma_mv[s->mb_x][1] = uvmy;
862 
863  if (v->fastuvmc) {
864  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
865  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
866  }
867  // Field conversion bias
868  if (v->cur_field_type != chroma_ref_type)
869  uvmy += 2 - 4 * chroma_ref_type;
870 
871  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
872  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
873 
874  if (v->profile != PROFILE_ADVANCED) {
875  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
876  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
877  } else {
878  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
879  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
880  }
881 
882  if (!dir) {
883  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
884  srcU = s->current_picture.f->data[1];
885  srcV = s->current_picture.f->data[2];
886  lutuv = v->curr_lutuv;
887  use_ic = v->curr_use_ic;
888  } else {
889  srcU = s->last_picture.f->data[1];
890  srcV = s->last_picture.f->data[2];
891  lutuv = v->last_lutuv;
892  use_ic = v->last_use_ic;
893  }
894  } else {
895  srcU = s->next_picture.f->data[1];
896  srcV = s->next_picture.f->data[2];
897  lutuv = v->next_lutuv;
898  use_ic = v->next_use_ic;
899  }
900 
901  if (!srcU) {
902  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
903  return;
904  }
905 
906  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
907  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
908 
909  if (v->field_mode) {
910  if (chroma_ref_type) {
911  srcU += s->current_picture_ptr->f->linesize[1];
912  srcV += s->current_picture_ptr->f->linesize[2];
913  }
914  }
915 
916  if (v->rangeredfrm || use_ic
917  || s->h_edge_pos < 18 || v_edge_pos < 18
918  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
919  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
921  s->uvlinesize, s->uvlinesize,
922  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
923  s->h_edge_pos >> 1, v_edge_pos >> 1);
924  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
925  s->uvlinesize, s->uvlinesize,
926  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
927  s->h_edge_pos >> 1, v_edge_pos >> 1);
928  srcU = s->edge_emu_buffer;
929  srcV = s->edge_emu_buffer + 16;
930 
931  /* if we deal with range reduction we need to scale source blocks */
932  if (v->rangeredfrm) {
933  int i, j;
934  uint8_t *src, *src2;
935 
936  src = srcU;
937  src2 = srcV;
938  for (j = 0; j < 9; j++) {
939  for (i = 0; i < 9; i++) {
940  src[i] = ((src[i] - 128) >> 1) + 128;
941  src2[i] = ((src2[i] - 128) >> 1) + 128;
942  }
943  src += s->uvlinesize;
944  src2 += s->uvlinesize;
945  }
946  }
947  /* if we deal with intensity compensation we need to scale source blocks */
948  if (use_ic) {
949  int i, j;
950  uint8_t *src, *src2;
951 
952  src = srcU;
953  src2 = srcV;
954  for (j = 0; j < 9; j++) {
955  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
956  for (i = 0; i < 9; i++) {
957  src[i] = lutuv[f][src[i]];
958  src2[i] = lutuv[f][src2[i]];
959  }
960  src += s->uvlinesize;
961  src2 += s->uvlinesize;
962  }
963  }
964  }
965 
966  /* Chroma MC always uses qpel bilinear */
967  uvmx = (uvmx & 3) << 1;
968  uvmy = (uvmy & 3) << 1;
969  if (!v->rnd) {
970  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
971  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
972  } else {
973  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
974  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
975  }
976 }
977 
980 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
981 {
982  MpegEncContext *s = &v->s;
983  H264ChromaContext *h264chroma = &v->h264chroma;
984  uint8_t *srcU, *srcV;
985  int uvsrc_x, uvsrc_y;
986  int uvmx_field[4], uvmy_field[4];
987  int i, off, tx, ty;
988  int fieldmv = v->blk_mv_type[s->block_index[0]];
989  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
990  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
991  int v_edge_pos = s->v_edge_pos >> 1;
992  int use_ic;
993  uint8_t (*lutuv)[256];
994 
995  if (s->flags & CODEC_FLAG_GRAY)
996  return;
997 
998  if (!s->last_picture.f->data[1]) {
999  av_log(s->avctx, AV_LOG_ERROR, "Bad data in last picture frame.\n");
1000  return;
1001  }
1002 
1003  for (i = 0; i < 4; i++) {
1004  int d = i < 2 ? dir: dir2;
1005  tx = s->mv[d][i][0];
1006  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1007  ty = s->mv[d][i][1];
1008  if (fieldmv)
1009  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1010  else
1011  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1012  }
1013 
1014  for (i = 0; i < 4; i++) {
1015  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1016  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1017  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1018  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1019  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1020  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1021  if (i < 2 ? dir : dir2) {
1022  srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1023  srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1024  lutuv = v->next_lutuv;
1025  use_ic = v->next_use_ic;
1026  } else {
1027  srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1028  srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1029  lutuv = v->last_lutuv;
1030  use_ic = v->last_use_ic;
1031  }
1032  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1033  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1034 
1035  if (fieldmv && !(uvsrc_y & 1))
1036  v_edge_pos--;
1037  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1038  uvsrc_y--;
1039  if (use_ic
1040  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1041  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1042  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1044  s->uvlinesize, s->uvlinesize,
1045  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1046  s->h_edge_pos >> 1, v_edge_pos);
1047  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1048  s->uvlinesize, s->uvlinesize,
1049  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1050  s->h_edge_pos >> 1, v_edge_pos);
1051  srcU = s->edge_emu_buffer;
1052  srcV = s->edge_emu_buffer + 16;
1053 
1054  /* if we deal with intensity compensation we need to scale source blocks */
1055  if (use_ic) {
1056  int i, j;
1057  uint8_t *src, *src2;
1058 
1059  src = srcU;
1060  src2 = srcV;
1061  for (j = 0; j < 5; j++) {
1062  int f = (uvsrc_y + (j << fieldmv)) & 1;
1063  for (i = 0; i < 5; i++) {
1064  src[i] = lutuv[f][src[i]];
1065  src2[i] = lutuv[f][src2[i]];
1066  }
1067  src += s->uvlinesize << fieldmv;
1068  src2 += s->uvlinesize << fieldmv;
1069  }
1070  }
1071  }
1072  if (avg) {
1073  if (!v->rnd) {
1074  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076  } else {
1077  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1079  }
1080  } else {
1081  if (!v->rnd) {
1082  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1083  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1084  } else {
1085  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1086  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1087  }
1088  }
1089  }
1090 }
1091 
1092 /***********************************************************************/
1103 #define GET_MQUANT() \
1104  if (v->dquantfrm) { \
1105  int edges = 0; \
1106  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1107  if (v->dqbilevel) { \
1108  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1109  } else { \
1110  mqdiff = get_bits(gb, 3); \
1111  if (mqdiff != 7) \
1112  mquant = v->pq + mqdiff; \
1113  else \
1114  mquant = get_bits(gb, 5); \
1115  } \
1116  } \
1117  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1118  edges = 1 << v->dqsbedge; \
1119  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1120  edges = (3 << v->dqsbedge) % 15; \
1121  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1122  edges = 15; \
1123  if ((edges&1) && !s->mb_x) \
1124  mquant = v->altpq; \
1125  if ((edges&2) && s->first_slice_line) \
1126  mquant = v->altpq; \
1127  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1128  mquant = v->altpq; \
1129  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1130  mquant = v->altpq; \
1131  if (!mquant || mquant > 31) { \
1132  av_log(v->s.avctx, AV_LOG_ERROR, \
1133  "Overriding invalid mquant %d\n", mquant); \
1134  mquant = 1; \
1135  } \
1136  }
1137 
1145 #define GET_MVDATA(_dmv_x, _dmv_y) \
1146  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1147  VC1_MV_DIFF_VLC_BITS, 2); \
1148  if (index > 36) { \
1149  mb_has_coeffs = 1; \
1150  index -= 37; \
1151  } else \
1152  mb_has_coeffs = 0; \
1153  s->mb_intra = 0; \
1154  if (!index) { \
1155  _dmv_x = _dmv_y = 0; \
1156  } else if (index == 35) { \
1157  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1158  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1159  } else if (index == 36) { \
1160  _dmv_x = 0; \
1161  _dmv_y = 0; \
1162  s->mb_intra = 1; \
1163  } else { \
1164  index1 = index % 6; \
1165  if (!s->quarter_sample && index1 == 5) val = 1; \
1166  else val = 0; \
1167  if (size_table[index1] - val > 0) \
1168  val = get_bits(gb, size_table[index1] - val); \
1169  else val = 0; \
1170  sign = 0 - (val&1); \
1171  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1172  \
1173  index1 = index / 6; \
1174  if (!s->quarter_sample && index1 == 5) val = 1; \
1175  else val = 0; \
1176  if (size_table[index1] - val > 0) \
1177  val = get_bits(gb, size_table[index1] - val); \
1178  else val = 0; \
1179  sign = 0 - (val & 1); \
1180  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1181  }
1182 
1184  int *dmv_y, int *pred_flag)
1185 {
1186  int index, index1;
1187  int extend_x = 0, extend_y = 0;
1188  GetBitContext *gb = &v->s.gb;
1189  int bits, esc;
1190  int val, sign;
1191  const int* offs_tab;
1192 
1193  if (v->numref) {
1194  bits = VC1_2REF_MVDATA_VLC_BITS;
1195  esc = 125;
1196  } else {
1197  bits = VC1_1REF_MVDATA_VLC_BITS;
1198  esc = 71;
1199  }
1200  switch (v->dmvrange) {
1201  case 1:
1202  extend_x = 1;
1203  break;
1204  case 2:
1205  extend_y = 1;
1206  break;
1207  case 3:
1208  extend_x = extend_y = 1;
1209  break;
1210  }
1211  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1212  if (index == esc) {
1213  *dmv_x = get_bits(gb, v->k_x);
1214  *dmv_y = get_bits(gb, v->k_y);
1215  if (v->numref) {
1216  if (pred_flag) {
1217  *pred_flag = *dmv_y & 1;
1218  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1219  } else {
1220  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1221  }
1222  }
1223  }
1224  else {
1225  if (extend_x)
1226  offs_tab = offset_table2;
1227  else
1228  offs_tab = offset_table1;
1229  index1 = (index + 1) % 9;
1230  if (index1 != 0) {
1231  val = get_bits(gb, index1 + extend_x);
1232  sign = 0 -(val & 1);
1233  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1234  } else
1235  *dmv_x = 0;
1236  if (extend_y)
1237  offs_tab = offset_table2;
1238  else
1239  offs_tab = offset_table1;
1240  index1 = (index + 1) / 9;
1241  if (index1 > v->numref) {
1242  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1243  sign = 0 - (val & 1);
1244  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1245  } else
1246  *dmv_y = 0;
1247  if (v->numref && pred_flag)
1248  *pred_flag = index1 & 1;
1249  }
1250 }
1251 
1252 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1253 {
1254  int scaledvalue, refdist;
1255  int scalesame1, scalesame2;
1256  int scalezone1_x, zone1offset_x;
1257  int table_index = dir ^ v->second_field;
1258 
1259  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1260  refdist = v->refdist;
1261  else
1262  refdist = dir ? v->brfd : v->frfd;
1263  if (refdist > 3)
1264  refdist = 3;
1265  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1266  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1267  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1268  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1269 
1270  if (FFABS(n) > 255)
1271  scaledvalue = n;
1272  else {
1273  if (FFABS(n) < scalezone1_x)
1274  scaledvalue = (n * scalesame1) >> 8;
1275  else {
1276  if (n < 0)
1277  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1278  else
1279  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1280  }
1281  }
1282  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1283 }
1284 
1285 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1286 {
1287  int scaledvalue, refdist;
1288  int scalesame1, scalesame2;
1289  int scalezone1_y, zone1offset_y;
1290  int table_index = dir ^ v->second_field;
1291 
1292  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1293  refdist = v->refdist;
1294  else
1295  refdist = dir ? v->brfd : v->frfd;
1296  if (refdist > 3)
1297  refdist = 3;
1298  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1299  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1300  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1301  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1302 
1303  if (FFABS(n) > 63)
1304  scaledvalue = n;
1305  else {
1306  if (FFABS(n) < scalezone1_y)
1307  scaledvalue = (n * scalesame1) >> 8;
1308  else {
1309  if (n < 0)
1310  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1311  else
1312  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1313  }
1314  }
1315 
1316  if (v->cur_field_type && !v->ref_field_type[dir])
1317  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1318  else
1319  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1320 }
1321 
1322 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1323 {
1324  int scalezone1_x, zone1offset_x;
1325  int scaleopp1, scaleopp2, brfd;
1326  int scaledvalue;
1327 
1328  brfd = FFMIN(v->brfd, 3);
1329  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1330  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1331  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1332  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1333 
1334  if (FFABS(n) > 255)
1335  scaledvalue = n;
1336  else {
1337  if (FFABS(n) < scalezone1_x)
1338  scaledvalue = (n * scaleopp1) >> 8;
1339  else {
1340  if (n < 0)
1341  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1342  else
1343  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1344  }
1345  }
1346  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1347 }
1348 
1349 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1350 {
1351  int scalezone1_y, zone1offset_y;
1352  int scaleopp1, scaleopp2, brfd;
1353  int scaledvalue;
1354 
1355  brfd = FFMIN(v->brfd, 3);
1356  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1357  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1358  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1359  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1360 
1361  if (FFABS(n) > 63)
1362  scaledvalue = n;
1363  else {
1364  if (FFABS(n) < scalezone1_y)
1365  scaledvalue = (n * scaleopp1) >> 8;
1366  else {
1367  if (n < 0)
1368  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1369  else
1370  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1371  }
1372  }
1373  if (v->cur_field_type && !v->ref_field_type[dir]) {
1374  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1375  } else {
1376  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1377  }
1378 }
1379 
1380 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1381  int dim, int dir)
1382 {
1383  int brfd, scalesame;
1384  int hpel = 1 - v->s.quarter_sample;
1385 
1386  n >>= hpel;
1387  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1388  if (dim)
1389  n = scaleforsame_y(v, i, n, dir) << hpel;
1390  else
1391  n = scaleforsame_x(v, n, dir) << hpel;
1392  return n;
1393  }
1394  brfd = FFMIN(v->brfd, 3);
1395  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1396 
1397  n = (n * scalesame >> 8) << hpel;
1398  return n;
1399 }
1400 
1401 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1402  int dim, int dir)
1403 {
1404  int refdist, scaleopp;
1405  int hpel = 1 - v->s.quarter_sample;
1406 
1407  n >>= hpel;
1408  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1409  if (dim)
1410  n = scaleforopp_y(v, n, dir) << hpel;
1411  else
1412  n = scaleforopp_x(v, n) << hpel;
1413  return n;
1414  }
1415  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1416  refdist = FFMIN(v->refdist, 3);
1417  else
1418  refdist = dir ? v->brfd : v->frfd;
1419  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1420 
1421  n = (n * scaleopp >> 8) << hpel;
1422  return n;
1423 }
1424 
1427 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1428  int mv1, int r_x, int r_y, uint8_t* is_intra,
1429  int pred_flag, int dir)
1430 {
1431  MpegEncContext *s = &v->s;
1432  int xy, wrap, off = 0;
1433  int16_t *A, *B, *C;
1434  int px, py;
1435  int sum;
1436  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1437  int opposite, a_f, b_f, c_f;
1438  int16_t field_predA[2];
1439  int16_t field_predB[2];
1440  int16_t field_predC[2];
1441  int a_valid, b_valid, c_valid;
1442  int hybridmv_thresh, y_bias = 0;
1443 
1444  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1446  mixedmv_pic = 1;
1447  else
1448  mixedmv_pic = 0;
1449  /* scale MV difference to be quad-pel */
1450  dmv_x <<= 1 - s->quarter_sample;
1451  dmv_y <<= 1 - s->quarter_sample;
1452 
1453  wrap = s->b8_stride;
1454  xy = s->block_index[n];
1455 
1456  if (s->mb_intra) {
1457  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1458  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1459  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1460  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1461  if (mv1) { /* duplicate motion data for 1-MV block */
1462  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1463  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1464  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1465  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1466  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1467  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1468  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1469  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1470  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1471  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1472  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1473  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1474  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1475  }
1476  return;
1477  }
1478 
1479  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1480  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1481  if (mv1) {
1482  if (v->field_mode && mixedmv_pic)
1483  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1484  else
1485  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1486  } else {
1487  //in 4-MV mode different blocks have different B predictor position
1488  switch (n) {
1489  case 0:
1490  off = (s->mb_x > 0) ? -1 : 1;
1491  break;
1492  case 1:
1493  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1494  break;
1495  case 2:
1496  off = 1;
1497  break;
1498  case 3:
1499  off = -1;
1500  }
1501  }
1502  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1503 
1504  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1505  b_valid = a_valid && (s->mb_width > 1);
1506  c_valid = s->mb_x || (n == 1 || n == 3);
1507  if (v->field_mode) {
1508  a_valid = a_valid && !is_intra[xy - wrap];
1509  b_valid = b_valid && !is_intra[xy - wrap + off];
1510  c_valid = c_valid && !is_intra[xy - 1];
1511  }
1512 
1513  if (a_valid) {
1514  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1515  num_oppfield += a_f;
1516  num_samefield += 1 - a_f;
1517  field_predA[0] = A[0];
1518  field_predA[1] = A[1];
1519  } else {
1520  field_predA[0] = field_predA[1] = 0;
1521  a_f = 0;
1522  }
1523  if (b_valid) {
1524  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1525  num_oppfield += b_f;
1526  num_samefield += 1 - b_f;
1527  field_predB[0] = B[0];
1528  field_predB[1] = B[1];
1529  } else {
1530  field_predB[0] = field_predB[1] = 0;
1531  b_f = 0;
1532  }
1533  if (c_valid) {
1534  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1535  num_oppfield += c_f;
1536  num_samefield += 1 - c_f;
1537  field_predC[0] = C[0];
1538  field_predC[1] = C[1];
1539  } else {
1540  field_predC[0] = field_predC[1] = 0;
1541  c_f = 0;
1542  }
1543 
1544  if (v->field_mode) {
1545  if (!v->numref)
1546  // REFFIELD determines if the last field or the second-last field is
1547  // to be used as reference
1548  opposite = 1 - v->reffield;
1549  else {
1550  if (num_samefield <= num_oppfield)
1551  opposite = 1 - pred_flag;
1552  else
1553  opposite = pred_flag;
1554  }
1555  } else
1556  opposite = 0;
1557  if (opposite) {
1558  if (a_valid && !a_f) {
1559  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1560  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1561  }
1562  if (b_valid && !b_f) {
1563  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1564  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1565  }
1566  if (c_valid && !c_f) {
1567  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1568  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1569  }
1570  v->mv_f[dir][xy + v->blocks_off] = 1;
1571  v->ref_field_type[dir] = !v->cur_field_type;
1572  } else {
1573  if (a_valid && a_f) {
1574  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1575  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1576  }
1577  if (b_valid && b_f) {
1578  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1579  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1580  }
1581  if (c_valid && c_f) {
1582  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1583  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1584  }
1585  v->mv_f[dir][xy + v->blocks_off] = 0;
1586  v->ref_field_type[dir] = v->cur_field_type;
1587  }
1588 
1589  if (a_valid) {
1590  px = field_predA[0];
1591  py = field_predA[1];
1592  } else if (c_valid) {
1593  px = field_predC[0];
1594  py = field_predC[1];
1595  } else if (b_valid) {
1596  px = field_predB[0];
1597  py = field_predB[1];
1598  } else {
1599  px = 0;
1600  py = 0;
1601  }
1602 
1603  if (num_samefield + num_oppfield > 1) {
1604  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1605  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1606  }
1607 
1608  /* Pullback MV as specified in 8.3.5.3.4 */
1609  if (!v->field_mode) {
1610  int qx, qy, X, Y;
1611  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1612  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1613  X = (s->mb_width << 6) - 4;
1614  Y = (s->mb_height << 6) - 4;
1615  if (mv1) {
1616  if (qx + px < -60) px = -60 - qx;
1617  if (qy + py < -60) py = -60 - qy;
1618  } else {
1619  if (qx + px < -28) px = -28 - qx;
1620  if (qy + py < -28) py = -28 - qy;
1621  }
1622  if (qx + px > X) px = X - qx;
1623  if (qy + py > Y) py = Y - qy;
1624  }
1625 
1626  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1627  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1628  hybridmv_thresh = 32;
1629  if (a_valid && c_valid) {
1630  if (is_intra[xy - wrap])
1631  sum = FFABS(px) + FFABS(py);
1632  else
1633  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1634  if (sum > hybridmv_thresh) {
1635  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1636  px = field_predA[0];
1637  py = field_predA[1];
1638  } else {
1639  px = field_predC[0];
1640  py = field_predC[1];
1641  }
1642  } else {
1643  if (is_intra[xy - 1])
1644  sum = FFABS(px) + FFABS(py);
1645  else
1646  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1647  if (sum > hybridmv_thresh) {
1648  if (get_bits1(&s->gb)) {
1649  px = field_predA[0];
1650  py = field_predA[1];
1651  } else {
1652  px = field_predC[0];
1653  py = field_predC[1];
1654  }
1655  }
1656  }
1657  }
1658  }
1659 
1660  if (v->field_mode && v->numref)
1661  r_y >>= 1;
1662  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1663  y_bias = 1;
1664  /* store MV using signed modulus of MV range defined in 4.11 */
1665  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1666  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1667  if (mv1) { /* duplicate motion data for 1-MV block */
1668  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1669  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1670  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1671  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1672  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1673  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1674  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1675  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1676  }
1677 }
1678 
1681 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1682  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1683 {
1684  MpegEncContext *s = &v->s;
1685  int xy, wrap, off = 0;
1686  int A[2], B[2], C[2];
1687  int px, py;
1688  int a_valid = 0, b_valid = 0, c_valid = 0;
1689  int field_a, field_b, field_c; // 0: same, 1: opposit
1690  int total_valid, num_samefield, num_oppfield;
1691  int pos_c, pos_b, n_adj;
1692 
1693  wrap = s->b8_stride;
1694  xy = s->block_index[n];
1695 
1696  if (s->mb_intra) {
1697  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1698  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1699  s->current_picture.motion_val[1][xy][0] = 0;
1700  s->current_picture.motion_val[1][xy][1] = 0;
1701  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1702  s->current_picture.motion_val[0][xy + 1][0] = 0;
1703  s->current_picture.motion_val[0][xy + 1][1] = 0;
1704  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1705  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1706  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1707  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1708  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1709  s->current_picture.motion_val[1][xy + 1][0] = 0;
1710  s->current_picture.motion_val[1][xy + 1][1] = 0;
1711  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1712  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1713  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1714  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1715  }
1716  return;
1717  }
1718 
1719  off = ((n == 0) || (n == 1)) ? 1 : -1;
1720  /* predict A */
1721  if (s->mb_x || (n == 1) || (n == 3)) {
1722  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1723  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1724  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1725  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1726  a_valid = 1;
1727  } else { // current block has frame mv and cand. has field MV (so average)
1728  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1729  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1730  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1731  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1732  a_valid = 1;
1733  }
1734  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1735  a_valid = 0;
1736  A[0] = A[1] = 0;
1737  }
1738  } else
1739  A[0] = A[1] = 0;
1740  /* Predict B and C */
1741  B[0] = B[1] = C[0] = C[1] = 0;
1742  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1743  if (!s->first_slice_line) {
1744  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1745  b_valid = 1;
1746  n_adj = n | 2;
1747  pos_b = s->block_index[n_adj] - 2 * wrap;
1748  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1749  n_adj = (n & 2) | (n & 1);
1750  }
1751  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1752  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1753  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1754  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1755  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1756  }
1757  }
1758  if (s->mb_width > 1) {
1759  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1760  c_valid = 1;
1761  n_adj = 2;
1762  pos_c = s->block_index[2] - 2 * wrap + 2;
1763  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1764  n_adj = n & 2;
1765  }
1766  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1767  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1768  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1769  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1770  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1771  }
1772  if (s->mb_x == s->mb_width - 1) {
1773  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1774  c_valid = 1;
1775  n_adj = 3;
1776  pos_c = s->block_index[3] - 2 * wrap - 2;
1777  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1778  n_adj = n | 1;
1779  }
1780  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1781  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1782  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1783  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1784  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1785  }
1786  } else
1787  c_valid = 0;
1788  }
1789  }
1790  }
1791  }
1792  } else {
1793  pos_b = s->block_index[1];
1794  b_valid = 1;
1795  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1796  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1797  pos_c = s->block_index[0];
1798  c_valid = 1;
1799  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1800  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1801  }
1802 
1803  total_valid = a_valid + b_valid + c_valid;
1804  // check if predictor A is out of bounds
1805  if (!s->mb_x && !(n == 1 || n == 3)) {
1806  A[0] = A[1] = 0;
1807  }
1808  // check if predictor B is out of bounds
1809  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1810  B[0] = B[1] = C[0] = C[1] = 0;
1811  }
1812  if (!v->blk_mv_type[xy]) {
1813  if (s->mb_width == 1) {
1814  px = B[0];
1815  py = B[1];
1816  } else {
1817  if (total_valid >= 2) {
1818  px = mid_pred(A[0], B[0], C[0]);
1819  py = mid_pred(A[1], B[1], C[1]);
1820  } else if (total_valid) {
1821  if (a_valid) { px = A[0]; py = A[1]; }
1822  if (b_valid) { px = B[0]; py = B[1]; }
1823  if (c_valid) { px = C[0]; py = C[1]; }
1824  } else
1825  px = py = 0;
1826  }
1827  } else {
1828  if (a_valid)
1829  field_a = (A[1] & 4) ? 1 : 0;
1830  else
1831  field_a = 0;
1832  if (b_valid)
1833  field_b = (B[1] & 4) ? 1 : 0;
1834  else
1835  field_b = 0;
1836  if (c_valid)
1837  field_c = (C[1] & 4) ? 1 : 0;
1838  else
1839  field_c = 0;
1840 
1841  num_oppfield = field_a + field_b + field_c;
1842  num_samefield = total_valid - num_oppfield;
1843  if (total_valid == 3) {
1844  if ((num_samefield == 3) || (num_oppfield == 3)) {
1845  px = mid_pred(A[0], B[0], C[0]);
1846  py = mid_pred(A[1], B[1], C[1]);
1847  } else if (num_samefield >= num_oppfield) {
1848  /* take one MV from same field set depending on priority
1849  the check for B may not be necessary */
1850  px = !field_a ? A[0] : B[0];
1851  py = !field_a ? A[1] : B[1];
1852  } else {
1853  px = field_a ? A[0] : B[0];
1854  py = field_a ? A[1] : B[1];
1855  }
1856  } else if (total_valid == 2) {
1857  if (num_samefield >= num_oppfield) {
1858  if (!field_a && a_valid) {
1859  px = A[0];
1860  py = A[1];
1861  } else if (!field_b && b_valid) {
1862  px = B[0];
1863  py = B[1];
1864  } else if (c_valid) {
1865  px = C[0];
1866  py = C[1];
1867  } else px = py = 0;
1868  } else {
1869  if (field_a && a_valid) {
1870  px = A[0];
1871  py = A[1];
1872  } else if (field_b && b_valid) {
1873  px = B[0];
1874  py = B[1];
1875  } else if (c_valid) {
1876  px = C[0];
1877  py = C[1];
1878  } else
1879  px = py = 0;
1880  }
1881  } else if (total_valid == 1) {
1882  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1883  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1884  } else
1885  px = py = 0;
1886  }
1887 
1888  /* store MV using signed modulus of MV range defined in 4.11 */
1889  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1890  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1891  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1892  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1893  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1894  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1895  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1896  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1897  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1898  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1899  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1900  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1901  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1902  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1903  }
1904 }
1905 
1908 static void vc1_interp_mc(VC1Context *v)
1909 {
1910  MpegEncContext *s = &v->s;
1911  H264ChromaContext *h264chroma = &v->h264chroma;
1912  uint8_t *srcY, *srcU, *srcV;
1913  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1914  int off, off_uv;
1915  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1916  int use_ic = v->next_use_ic;
1917 
1918  if (!v->field_mode && !v->s.next_picture.f->data[0])
1919  return;
1920 
1921  mx = s->mv[1][0][0];
1922  my = s->mv[1][0][1];
1923  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1924  uvmy = (my + ((my & 3) == 3)) >> 1;
1925  if (v->field_mode) {
1926  if (v->cur_field_type != v->ref_field_type[1])
1927  my = my - 2 + 4 * v->cur_field_type;
1928  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1929  }
1930  if (v->fastuvmc) {
1931  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1932  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1933  }
1934  srcY = s->next_picture.f->data[0];
1935  srcU = s->next_picture.f->data[1];
1936  srcV = s->next_picture.f->data[2];
1937 
1938  src_x = s->mb_x * 16 + (mx >> 2);
1939  src_y = s->mb_y * 16 + (my >> 2);
1940  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1941  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1942 
1943  if (v->profile != PROFILE_ADVANCED) {
1944  src_x = av_clip( src_x, -16, s->mb_width * 16);
1945  src_y = av_clip( src_y, -16, s->mb_height * 16);
1946  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1947  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1948  } else {
1949  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1950  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1951  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1952  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1953  }
1954 
1955  srcY += src_y * s->linesize + src_x;
1956  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1957  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1958 
1959  if (v->field_mode && v->ref_field_type[1]) {
1960  srcY += s->current_picture_ptr->f->linesize[0];
1961  srcU += s->current_picture_ptr->f->linesize[1];
1962  srcV += s->current_picture_ptr->f->linesize[2];
1963  }
1964 
1965  /* for grayscale we should not try to read from unknown area */
1966  if (s->flags & CODEC_FLAG_GRAY) {
1967  srcU = s->edge_emu_buffer + 18 * s->linesize;
1968  srcV = s->edge_emu_buffer + 18 * s->linesize;
1969  }
1970 
1971  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1972  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1973  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1974  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1975 
1976  srcY -= s->mspel * (1 + s->linesize);
1978  s->linesize, s->linesize,
1979  17 + s->mspel * 2, 17 + s->mspel * 2,
1980  src_x - s->mspel, src_y - s->mspel,
1981  s->h_edge_pos, v_edge_pos);
1982  srcY = s->edge_emu_buffer;
1983  s->vdsp.emulated_edge_mc(uvbuf, srcU,
1984  s->uvlinesize, s->uvlinesize,
1985  8 + 1, 8 + 1,
1986  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1987  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1988  s->uvlinesize, s->uvlinesize,
1989  8 + 1, 8 + 1,
1990  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1991  srcU = uvbuf;
1992  srcV = uvbuf + 16;
1993  /* if we deal with range reduction we need to scale source blocks */
1994  if (v->rangeredfrm) {
1995  int i, j;
1996  uint8_t *src, *src2;
1997 
1998  src = srcY;
1999  for (j = 0; j < 17 + s->mspel * 2; j++) {
2000  for (i = 0; i < 17 + s->mspel * 2; i++)
2001  src[i] = ((src[i] - 128) >> 1) + 128;
2002  src += s->linesize;
2003  }
2004  src = srcU;
2005  src2 = srcV;
2006  for (j = 0; j < 9; j++) {
2007  for (i = 0; i < 9; i++) {
2008  src[i] = ((src[i] - 128) >> 1) + 128;
2009  src2[i] = ((src2[i] - 128) >> 1) + 128;
2010  }
2011  src += s->uvlinesize;
2012  src2 += s->uvlinesize;
2013  }
2014  }
2015 
2016  if (use_ic) {
2017  uint8_t (*luty )[256] = v->next_luty;
2018  uint8_t (*lutuv)[256] = v->next_lutuv;
2019  int i, j;
2020  uint8_t *src, *src2;
2021 
2022  src = srcY;
2023  for (j = 0; j < 17 + s->mspel * 2; j++) {
2024  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2025  for (i = 0; i < 17 + s->mspel * 2; i++)
2026  src[i] = luty[f][src[i]];
2027  src += s->linesize;
2028  }
2029  src = srcU;
2030  src2 = srcV;
2031  for (j = 0; j < 9; j++) {
2032  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2033  for (i = 0; i < 9; i++) {
2034  src[i] = lutuv[f][src[i]];
2035  src2[i] = lutuv[f][src2[i]];
2036  }
2037  src += s->uvlinesize;
2038  src2 += s->uvlinesize;
2039  }
2040  }
2041  srcY += s->mspel * (1 + s->linesize);
2042  }
2043 
2044  off = 0;
2045  off_uv = 0;
2046 
2047  if (s->mspel) {
2048  dxy = ((my & 3) << 2) | (mx & 3);
2049  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2050  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2051  srcY += s->linesize * 8;
2052  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2053  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2054  } else { // hpel mc
2055  dxy = (my & 2) | ((mx & 2) >> 1);
2056 
2057  if (!v->rnd)
2058  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2059  else
2060  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2061  }
2062 
2063  if (s->flags & CODEC_FLAG_GRAY) return;
2064  /* Chroma MC always uses qpel blilinear */
2065  uvmx = (uvmx & 3) << 1;
2066  uvmy = (uvmy & 3) << 1;
2067  if (!v->rnd) {
2068  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2069  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2070  } else {
2071  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2072  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2073  }
2074 }
2075 
2076 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2077 {
2078  int n = bfrac;
2079 
2080 #if B_FRACTION_DEN==256
2081  if (inv)
2082  n -= 256;
2083  if (!qs)
2084  return 2 * ((value * n + 255) >> 9);
2085  return (value * n + 128) >> 8;
2086 #else
2087  if (inv)
2088  n -= B_FRACTION_DEN;
2089  if (!qs)
2090  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2091  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2092 #endif
2093 }
2094 
2097 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2098  int direct, int mode)
2099 {
2100  if (direct) {
2101  vc1_mc_1mv(v, 0);
2102  vc1_interp_mc(v);
2103  return;
2104  }
2105  if (mode == BMV_TYPE_INTERPOLATED) {
2106  vc1_mc_1mv(v, 0);
2107  vc1_interp_mc(v);
2108  return;
2109  }
2110 
2111  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2112 }
2113 
2114 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2115  int direct, int mvtype)
2116 {
2117  MpegEncContext *s = &v->s;
2118  int xy, wrap, off = 0;
2119  int16_t *A, *B, *C;
2120  int px, py;
2121  int sum;
2122  int r_x, r_y;
2123  const uint8_t *is_intra = v->mb_type[0];
2124 
2125  r_x = v->range_x;
2126  r_y = v->range_y;
2127  /* scale MV difference to be quad-pel */
2128  dmv_x[0] <<= 1 - s->quarter_sample;
2129  dmv_y[0] <<= 1 - s->quarter_sample;
2130  dmv_x[1] <<= 1 - s->quarter_sample;
2131  dmv_y[1] <<= 1 - s->quarter_sample;
2132 
2133  wrap = s->b8_stride;
2134  xy = s->block_index[0];
2135 
2136  if (s->mb_intra) {
2137  s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2138  s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2139  s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2140  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2141  return;
2142  }
2143  if (!v->field_mode) {
2144  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2145  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2146  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2147  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2148 
2149  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2150  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2151  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2152  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2153  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2154  }
2155  if (direct) {
2156  s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2157  s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2158  s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2159  s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2160  return;
2161  }
2162 
2163  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2164  C = s->current_picture.motion_val[0][xy - 2];
2165  A = s->current_picture.motion_val[0][xy - wrap * 2];
2166  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2167  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2168 
2169  if (!s->mb_x) C[0] = C[1] = 0;
2170  if (!s->first_slice_line) { // predictor A is not out of bounds
2171  if (s->mb_width == 1) {
2172  px = A[0];
2173  py = A[1];
2174  } else {
2175  px = mid_pred(A[0], B[0], C[0]);
2176  py = mid_pred(A[1], B[1], C[1]);
2177  }
2178  } else if (s->mb_x) { // predictor C is not out of bounds
2179  px = C[0];
2180  py = C[1];
2181  } else {
2182  px = py = 0;
2183  }
2184  /* Pullback MV as specified in 8.3.5.3.4 */
2185  {
2186  int qx, qy, X, Y;
2187  if (v->profile < PROFILE_ADVANCED) {
2188  qx = (s->mb_x << 5);
2189  qy = (s->mb_y << 5);
2190  X = (s->mb_width << 5) - 4;
2191  Y = (s->mb_height << 5) - 4;
2192  if (qx + px < -28) px = -28 - qx;
2193  if (qy + py < -28) py = -28 - qy;
2194  if (qx + px > X) px = X - qx;
2195  if (qy + py > Y) py = Y - qy;
2196  } else {
2197  qx = (s->mb_x << 6);
2198  qy = (s->mb_y << 6);
2199  X = (s->mb_width << 6) - 4;
2200  Y = (s->mb_height << 6) - 4;
2201  if (qx + px < -60) px = -60 - qx;
2202  if (qy + py < -60) py = -60 - qy;
2203  if (qx + px > X) px = X - qx;
2204  if (qy + py > Y) py = Y - qy;
2205  }
2206  }
2207  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2208  if (0 && !s->first_slice_line && s->mb_x) {
2209  if (is_intra[xy - wrap])
2210  sum = FFABS(px) + FFABS(py);
2211  else
2212  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2213  if (sum > 32) {
2214  if (get_bits1(&s->gb)) {
2215  px = A[0];
2216  py = A[1];
2217  } else {
2218  px = C[0];
2219  py = C[1];
2220  }
2221  } else {
2222  if (is_intra[xy - 2])
2223  sum = FFABS(px) + FFABS(py);
2224  else
2225  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2226  if (sum > 32) {
2227  if (get_bits1(&s->gb)) {
2228  px = A[0];
2229  py = A[1];
2230  } else {
2231  px = C[0];
2232  py = C[1];
2233  }
2234  }
2235  }
2236  }
2237  /* store MV using signed modulus of MV range defined in 4.11 */
2238  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2239  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2240  }
2241  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2242  C = s->current_picture.motion_val[1][xy - 2];
2243  A = s->current_picture.motion_val[1][xy - wrap * 2];
2244  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2245  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2246 
2247  if (!s->mb_x)
2248  C[0] = C[1] = 0;
2249  if (!s->first_slice_line) { // predictor A is not out of bounds
2250  if (s->mb_width == 1) {
2251  px = A[0];
2252  py = A[1];
2253  } else {
2254  px = mid_pred(A[0], B[0], C[0]);
2255  py = mid_pred(A[1], B[1], C[1]);
2256  }
2257  } else if (s->mb_x) { // predictor C is not out of bounds
2258  px = C[0];
2259  py = C[1];
2260  } else {
2261  px = py = 0;
2262  }
2263  /* Pullback MV as specified in 8.3.5.3.4 */
2264  {
2265  int qx, qy, X, Y;
2266  if (v->profile < PROFILE_ADVANCED) {
2267  qx = (s->mb_x << 5);
2268  qy = (s->mb_y << 5);
2269  X = (s->mb_width << 5) - 4;
2270  Y = (s->mb_height << 5) - 4;
2271  if (qx + px < -28) px = -28 - qx;
2272  if (qy + py < -28) py = -28 - qy;
2273  if (qx + px > X) px = X - qx;
2274  if (qy + py > Y) py = Y - qy;
2275  } else {
2276  qx = (s->mb_x << 6);
2277  qy = (s->mb_y << 6);
2278  X = (s->mb_width << 6) - 4;
2279  Y = (s->mb_height << 6) - 4;
2280  if (qx + px < -60) px = -60 - qx;
2281  if (qy + py < -60) py = -60 - qy;
2282  if (qx + px > X) px = X - qx;
2283  if (qy + py > Y) py = Y - qy;
2284  }
2285  }
2286  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2287  if (0 && !s->first_slice_line && s->mb_x) {
2288  if (is_intra[xy - wrap])
2289  sum = FFABS(px) + FFABS(py);
2290  else
2291  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2292  if (sum > 32) {
2293  if (get_bits1(&s->gb)) {
2294  px = A[0];
2295  py = A[1];
2296  } else {
2297  px = C[0];
2298  py = C[1];
2299  }
2300  } else {
2301  if (is_intra[xy - 2])
2302  sum = FFABS(px) + FFABS(py);
2303  else
2304  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2305  if (sum > 32) {
2306  if (get_bits1(&s->gb)) {
2307  px = A[0];
2308  py = A[1];
2309  } else {
2310  px = C[0];
2311  py = C[1];
2312  }
2313  }
2314  }
2315  }
2316  /* store MV using signed modulus of MV range defined in 4.11 */
2317 
2318  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2319  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2320  }
2321  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2322  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2323  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2324  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2325 }
2326 
2327 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2328 {
2329  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2330  MpegEncContext *s = &v->s;
2331  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2332 
2333  if (v->bmvtype == BMV_TYPE_DIRECT) {
2334  int total_opp, k, f;
2335  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2336  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2337  v->bfraction, 0, s->quarter_sample);
2338  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2339  v->bfraction, 0, s->quarter_sample);
2340  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2341  v->bfraction, 1, s->quarter_sample);
2342  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2343  v->bfraction, 1, s->quarter_sample);
2344 
2345  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2346  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2347  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2348  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2349  f = (total_opp > 2) ? 1 : 0;
2350  } else {
2351  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2352  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2353  f = 0;
2354  }
2355  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2356  for (k = 0; k < 4; k++) {
2357  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2358  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2359  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2360  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2361  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2362  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2363  }
2364  return;
2365  }
2366  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2367  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2368  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2369  return;
2370  }
2371  if (dir) { // backward
2372  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2373  if (n == 3 || mv1) {
2374  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2375  }
2376  } else { // forward
2377  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2378  if (n == 3 || mv1) {
2379  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2380  }
2381  }
2382 }
2383 
2393 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2394  int16_t **dc_val_ptr, int *dir_ptr)
2395 {
2396  int a, b, c, wrap, pred, scale;
2397  int16_t *dc_val;
2398  static const uint16_t dcpred[32] = {
2399  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2400  114, 102, 93, 85, 79, 73, 68, 64,
2401  60, 57, 54, 51, 49, 47, 45, 43,
2402  41, 39, 38, 37, 35, 34, 33
2403  };
2404 
2405  /* find prediction - wmv3_dc_scale always used here in fact */
2406  if (n < 4) scale = s->y_dc_scale;
2407  else scale = s->c_dc_scale;
2408 
2409  wrap = s->block_wrap[n];
2410  dc_val = s->dc_val[0] + s->block_index[n];
2411 
2412  /* B A
2413  * C X
2414  */
2415  c = dc_val[ - 1];
2416  b = dc_val[ - 1 - wrap];
2417  a = dc_val[ - wrap];
2418 
2419  if (pq < 9 || !overlap) {
2420  /* Set outer values */
2421  if (s->first_slice_line && (n != 2 && n != 3))
2422  b = a = dcpred[scale];
2423  if (s->mb_x == 0 && (n != 1 && n != 3))
2424  b = c = dcpred[scale];
2425  } else {
2426  /* Set outer values */
2427  if (s->first_slice_line && (n != 2 && n != 3))
2428  b = a = 0;
2429  if (s->mb_x == 0 && (n != 1 && n != 3))
2430  b = c = 0;
2431  }
2432 
2433  if (abs(a - b) <= abs(b - c)) {
2434  pred = c;
2435  *dir_ptr = 1; // left
2436  } else {
2437  pred = a;
2438  *dir_ptr = 0; // top
2439  }
2440 
2441  /* update predictor */
2442  *dc_val_ptr = &dc_val[0];
2443  return pred;
2444 }
2445 
2446 
2458 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2459  int a_avail, int c_avail,
2460  int16_t **dc_val_ptr, int *dir_ptr)
2461 {
2462  int a, b, c, wrap, pred;
2463  int16_t *dc_val;
2464  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2465  int q1, q2 = 0;
2466  int dqscale_index;
2467 
2468  wrap = s->block_wrap[n];
2469  dc_val = s->dc_val[0] + s->block_index[n];
2470 
2471  /* B A
2472  * C X
2473  */
2474  c = dc_val[ - 1];
2475  b = dc_val[ - 1 - wrap];
2476  a = dc_val[ - wrap];
2477  /* scale predictors if needed */
2478  q1 = s->current_picture.qscale_table[mb_pos];
2479  dqscale_index = s->y_dc_scale_table[q1] - 1;
2480  if (dqscale_index < 0)
2481  return 0;
2482  if (c_avail && (n != 1 && n != 3)) {
2483  q2 = s->current_picture.qscale_table[mb_pos - 1];
2484  if (q2 && q2 != q1)
2485  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2486  }
2487  if (a_avail && (n != 2 && n != 3)) {
2488  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2489  if (q2 && q2 != q1)
2490  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2491  }
2492  if (a_avail && c_avail && (n != 3)) {
2493  int off = mb_pos;
2494  if (n != 1)
2495  off--;
2496  if (n != 2)
2497  off -= s->mb_stride;
2498  q2 = s->current_picture.qscale_table[off];
2499  if (q2 && q2 != q1)
2500  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2501  }
2502 
2503  if (a_avail && c_avail) {
2504  if (abs(a - b) <= abs(b - c)) {
2505  pred = c;
2506  *dir_ptr = 1; // left
2507  } else {
2508  pred = a;
2509  *dir_ptr = 0; // top
2510  }
2511  } else if (a_avail) {
2512  pred = a;
2513  *dir_ptr = 0; // top
2514  } else if (c_avail) {
2515  pred = c;
2516  *dir_ptr = 1; // left
2517  } else {
2518  pred = 0;
2519  *dir_ptr = 1; // left
2520  }
2521 
2522  /* update predictor */
2523  *dc_val_ptr = &dc_val[0];
2524  return pred;
2525 }
2526  // Block group
2528 
2535 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2536  uint8_t **coded_block_ptr)
2537 {
2538  int xy, wrap, pred, a, b, c;
2539 
2540  xy = s->block_index[n];
2541  wrap = s->b8_stride;
2542 
2543  /* B C
2544  * A X
2545  */
2546  a = s->coded_block[xy - 1 ];
2547  b = s->coded_block[xy - 1 - wrap];
2548  c = s->coded_block[xy - wrap];
2549 
2550  if (b == c) {
2551  pred = a;
2552  } else {
2553  pred = c;
2554  }
2555 
2556  /* store value */
2557  *coded_block_ptr = &s->coded_block[xy];
2558 
2559  return pred;
2560 }
2561 
2571 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2572  int *value, int codingset)
2573 {
2574  GetBitContext *gb = &v->s.gb;
2575  int index, escape, run = 0, level = 0, lst = 0;
2576 
2577  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2578  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2579  run = vc1_index_decode_table[codingset][index][0];
2580  level = vc1_index_decode_table[codingset][index][1];
2581  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2582  if (get_bits1(gb))
2583  level = -level;
2584  } else {
2585  escape = decode210(gb);
2586  if (escape != 2) {
2587  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2588  run = vc1_index_decode_table[codingset][index][0];
2589  level = vc1_index_decode_table[codingset][index][1];
2590  lst = index >= vc1_last_decode_table[codingset];
2591  if (escape == 0) {
2592  if (lst)
2593  level += vc1_last_delta_level_table[codingset][run];
2594  else
2595  level += vc1_delta_level_table[codingset][run];
2596  } else {
2597  if (lst)
2598  run += vc1_last_delta_run_table[codingset][level] + 1;
2599  else
2600  run += vc1_delta_run_table[codingset][level] + 1;
2601  }
2602  if (get_bits1(gb))
2603  level = -level;
2604  } else {
2605  int sign;
2606  lst = get_bits1(gb);
2607  if (v->s.esc3_level_length == 0) {
2608  if (v->pq < 8 || v->dquantfrm) { // table 59
2609  v->s.esc3_level_length = get_bits(gb, 3);
2610  if (!v->s.esc3_level_length)
2611  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2612  } else { // table 60
2613  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2614  }
2615  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2616  }
2617  run = get_bits(gb, v->s.esc3_run_length);
2618  sign = get_bits1(gb);
2619  level = get_bits(gb, v->s.esc3_level_length);
2620  if (sign)
2621  level = -level;
2622  }
2623  }
2624 
2625  *last = lst;
2626  *skip = run;
2627  *value = level;
2628 }
2629 
2637 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2638  int coded, int codingset)
2639 {
2640  GetBitContext *gb = &v->s.gb;
2641  MpegEncContext *s = &v->s;
2642  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2643  int i;
2644  int16_t *dc_val;
2645  int16_t *ac_val, *ac_val2;
2646  int dcdiff;
2647 
2648  /* Get DC differential */
2649  if (n < 4) {
2651  } else {
2653  }
2654  if (dcdiff < 0) {
2655  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2656  return -1;
2657  }
2658  if (dcdiff) {
2659  if (dcdiff == 119 /* ESC index value */) {
2660  /* TODO: Optimize */
2661  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2662  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2663  else dcdiff = get_bits(gb, 8);
2664  } else {
2665  if (v->pq == 1)
2666  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2667  else if (v->pq == 2)
2668  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2669  }
2670  if (get_bits1(gb))
2671  dcdiff = -dcdiff;
2672  }
2673 
2674  /* Prediction */
2675  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2676  *dc_val = dcdiff;
2677 
2678  /* Store the quantized DC coeff, used for prediction */
2679  if (n < 4) {
2680  block[0] = dcdiff * s->y_dc_scale;
2681  } else {
2682  block[0] = dcdiff * s->c_dc_scale;
2683  }
2684  /* Skip ? */
2685  if (!coded) {
2686  goto not_coded;
2687  }
2688 
2689  // AC Decoding
2690  i = 1;
2691 
2692  {
2693  int last = 0, skip, value;
2694  const uint8_t *zz_table;
2695  int scale;
2696  int k;
2697 
2698  scale = v->pq * 2 + v->halfpq;
2699 
2700  if (v->s.ac_pred) {
2701  if (!dc_pred_dir)
2702  zz_table = v->zz_8x8[2];
2703  else
2704  zz_table = v->zz_8x8[3];
2705  } else
2706  zz_table = v->zz_8x8[1];
2707 
2708  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2709  ac_val2 = ac_val;
2710  if (dc_pred_dir) // left
2711  ac_val -= 16;
2712  else // top
2713  ac_val -= 16 * s->block_wrap[n];
2714 
2715  while (!last) {
2716  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2717  i += skip;
2718  if (i > 63)
2719  break;
2720  block[zz_table[i++]] = value;
2721  }
2722 
2723  /* apply AC prediction if needed */
2724  if (s->ac_pred) {
2725  if (dc_pred_dir) { // left
2726  for (k = 1; k < 8; k++)
2727  block[k << v->left_blk_sh] += ac_val[k];
2728  } else { // top
2729  for (k = 1; k < 8; k++)
2730  block[k << v->top_blk_sh] += ac_val[k + 8];
2731  }
2732  }
2733  /* save AC coeffs for further prediction */
2734  for (k = 1; k < 8; k++) {
2735  ac_val2[k] = block[k << v->left_blk_sh];
2736  ac_val2[k + 8] = block[k << v->top_blk_sh];
2737  }
2738 
2739  /* scale AC coeffs */
2740  for (k = 1; k < 64; k++)
2741  if (block[k]) {
2742  block[k] *= scale;
2743  if (!v->pquantizer)
2744  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2745  }
2746 
2747  if (s->ac_pred) i = 63;
2748  }
2749 
2750 not_coded:
2751  if (!coded) {
2752  int k, scale;
2753  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2754  ac_val2 = ac_val;
2755 
2756  i = 0;
2757  scale = v->pq * 2 + v->halfpq;
2758  memset(ac_val2, 0, 16 * 2);
2759  if (dc_pred_dir) { // left
2760  ac_val -= 16;
2761  if (s->ac_pred)
2762  memcpy(ac_val2, ac_val, 8 * 2);
2763  } else { // top
2764  ac_val -= 16 * s->block_wrap[n];
2765  if (s->ac_pred)
2766  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2767  }
2768 
2769  /* apply AC prediction if needed */
2770  if (s->ac_pred) {
2771  if (dc_pred_dir) { //left
2772  for (k = 1; k < 8; k++) {
2773  block[k << v->left_blk_sh] = ac_val[k] * scale;
2774  if (!v->pquantizer && block[k << v->left_blk_sh])
2775  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2776  }
2777  } else { // top
2778  for (k = 1; k < 8; k++) {
2779  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2780  if (!v->pquantizer && block[k << v->top_blk_sh])
2781  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2782  }
2783  }
2784  i = 63;
2785  }
2786  }
2787  s->block_last_index[n] = i;
2788 
2789  return 0;
2790 }
2791 
2800 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2801  int coded, int codingset, int mquant)
2802 {
2803  GetBitContext *gb = &v->s.gb;
2804  MpegEncContext *s = &v->s;
2805  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2806  int i;
2807  int16_t *dc_val;
2808  int16_t *ac_val, *ac_val2;
2809  int dcdiff;
2810  int a_avail = v->a_avail, c_avail = v->c_avail;
2811  int use_pred = s->ac_pred;
2812  int scale;
2813  int q1, q2 = 0;
2814  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2815 
2816  /* Get DC differential */
2817  if (n < 4) {
2819  } else {
2821  }
2822  if (dcdiff < 0) {
2823  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2824  return -1;
2825  }
2826  if (dcdiff) {
2827  if (dcdiff == 119 /* ESC index value */) {
2828  /* TODO: Optimize */
2829  if (mquant == 1) dcdiff = get_bits(gb, 10);
2830  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2831  else dcdiff = get_bits(gb, 8);
2832  } else {
2833  if (mquant == 1)
2834  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2835  else if (mquant == 2)
2836  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2837  }
2838  if (get_bits1(gb))
2839  dcdiff = -dcdiff;
2840  }
2841 
2842  /* Prediction */
2843  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2844  *dc_val = dcdiff;
2845 
2846  /* Store the quantized DC coeff, used for prediction */
2847  if (n < 4) {
2848  block[0] = dcdiff * s->y_dc_scale;
2849  } else {
2850  block[0] = dcdiff * s->c_dc_scale;
2851  }
2852 
2853  //AC Decoding
2854  i = 1;
2855 
2856  /* check if AC is needed at all */
2857  if (!a_avail && !c_avail)
2858  use_pred = 0;
2859  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2860  ac_val2 = ac_val;
2861 
2862  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2863 
2864  if (dc_pred_dir) // left
2865  ac_val -= 16;
2866  else // top
2867  ac_val -= 16 * s->block_wrap[n];
2868 
2869  q1 = s->current_picture.qscale_table[mb_pos];
2870  if ( dc_pred_dir && c_avail && mb_pos)
2871  q2 = s->current_picture.qscale_table[mb_pos - 1];
2872  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2873  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2874  if ( dc_pred_dir && n == 1)
2875  q2 = q1;
2876  if (!dc_pred_dir && n == 2)
2877  q2 = q1;
2878  if (n == 3)
2879  q2 = q1;
2880 
2881  if (coded) {
2882  int last = 0, skip, value;
2883  const uint8_t *zz_table;
2884  int k;
2885 
2886  if (v->s.ac_pred) {
2887  if (!use_pred && v->fcm == ILACE_FRAME) {
2888  zz_table = v->zzi_8x8;
2889  } else {
2890  if (!dc_pred_dir) // top
2891  zz_table = v->zz_8x8[2];
2892  else // left
2893  zz_table = v->zz_8x8[3];
2894  }
2895  } else {
2896  if (v->fcm != ILACE_FRAME)
2897  zz_table = v->zz_8x8[1];
2898  else
2899  zz_table = v->zzi_8x8;
2900  }
2901 
2902  while (!last) {
2903  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2904  i += skip;
2905  if (i > 63)
2906  break;
2907  block[zz_table[i++]] = value;
2908  }
2909 
2910  /* apply AC prediction if needed */
2911  if (use_pred) {
2912  /* scale predictors if needed*/
2913  if (q2 && q1 != q2) {
2914  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2915  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2916 
2917  if (q1 < 1)
2918  return AVERROR_INVALIDDATA;
2919  if (dc_pred_dir) { // left
2920  for (k = 1; k < 8; k++)
2921  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2922  } else { // top
2923  for (k = 1; k < 8; k++)
2924  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2925  }
2926  } else {
2927  if (dc_pred_dir) { //left
2928  for (k = 1; k < 8; k++)
2929  block[k << v->left_blk_sh] += ac_val[k];
2930  } else { //top
2931  for (k = 1; k < 8; k++)
2932  block[k << v->top_blk_sh] += ac_val[k + 8];
2933  }
2934  }
2935  }
2936  /* save AC coeffs for further prediction */
2937  for (k = 1; k < 8; k++) {
2938  ac_val2[k ] = block[k << v->left_blk_sh];
2939  ac_val2[k + 8] = block[k << v->top_blk_sh];
2940  }
2941 
2942  /* scale AC coeffs */
2943  for (k = 1; k < 64; k++)
2944  if (block[k]) {
2945  block[k] *= scale;
2946  if (!v->pquantizer)
2947  block[k] += (block[k] < 0) ? -mquant : mquant;
2948  }
2949 
2950  if (use_pred) i = 63;
2951  } else { // no AC coeffs
2952  int k;
2953 
2954  memset(ac_val2, 0, 16 * 2);
2955  if (dc_pred_dir) { // left
2956  if (use_pred) {
2957  memcpy(ac_val2, ac_val, 8 * 2);
2958  if (q2 && q1 != q2) {
2959  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2960  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2961  if (q1 < 1)
2962  return AVERROR_INVALIDDATA;
2963  for (k = 1; k < 8; k++)
2964  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2965  }
2966  }
2967  } else { // top
2968  if (use_pred) {
2969  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2970  if (q2 && q1 != q2) {
2971  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2972  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2973  if (q1 < 1)
2974  return AVERROR_INVALIDDATA;
2975  for (k = 1; k < 8; k++)
2976  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2977  }
2978  }
2979  }
2980 
2981  /* apply AC prediction if needed */
2982  if (use_pred) {
2983  if (dc_pred_dir) { // left
2984  for (k = 1; k < 8; k++) {
2985  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2986  if (!v->pquantizer && block[k << v->left_blk_sh])
2987  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2988  }
2989  } else { // top
2990  for (k = 1; k < 8; k++) {
2991  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2992  if (!v->pquantizer && block[k << v->top_blk_sh])
2993  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2994  }
2995  }
2996  i = 63;
2997  }
2998  }
2999  s->block_last_index[n] = i;
3000 
3001  return 0;
3002 }
3003 
3012 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3013  int coded, int mquant, int codingset)
3014 {
3015  GetBitContext *gb = &v->s.gb;
3016  MpegEncContext *s = &v->s;
3017  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3018  int i;
3019  int16_t *dc_val;
3020  int16_t *ac_val, *ac_val2;
3021  int dcdiff;
3022  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3023  int a_avail = v->a_avail, c_avail = v->c_avail;
3024  int use_pred = s->ac_pred;
3025  int scale;
3026  int q1, q2 = 0;
3027 
3028  s->bdsp.clear_block(block);
3029 
3030  /* XXX: Guard against dumb values of mquant */
3031  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3032 
3033  /* Set DC scale - y and c use the same */
3034  s->y_dc_scale = s->y_dc_scale_table[mquant];
3035  s->c_dc_scale = s->c_dc_scale_table[mquant];
3036 
3037  /* Get DC differential */
3038  if (n < 4) {
3040  } else {
3042  }
3043  if (dcdiff < 0) {
3044  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3045  return -1;
3046  }
3047  if (dcdiff) {
3048  if (dcdiff == 119 /* ESC index value */) {
3049  /* TODO: Optimize */
3050  if (mquant == 1) dcdiff = get_bits(gb, 10);
3051  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3052  else dcdiff = get_bits(gb, 8);
3053  } else {
3054  if (mquant == 1)
3055  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3056  else if (mquant == 2)
3057  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3058  }
3059  if (get_bits1(gb))
3060  dcdiff = -dcdiff;
3061  }
3062 
3063  /* Prediction */
3064  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3065  *dc_val = dcdiff;
3066 
3067  /* Store the quantized DC coeff, used for prediction */
3068 
3069  if (n < 4) {
3070  block[0] = dcdiff * s->y_dc_scale;
3071  } else {
3072  block[0] = dcdiff * s->c_dc_scale;
3073  }
3074 
3075  //AC Decoding
3076  i = 1;
3077 
3078  /* check if AC is needed at all and adjust direction if needed */
3079  if (!a_avail) dc_pred_dir = 1;
3080  if (!c_avail) dc_pred_dir = 0;
3081  if (!a_avail && !c_avail) use_pred = 0;
3082  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3083  ac_val2 = ac_val;
3084 
3085  scale = mquant * 2 + v->halfpq;
3086 
3087  if (dc_pred_dir) //left
3088  ac_val -= 16;
3089  else //top
3090  ac_val -= 16 * s->block_wrap[n];
3091 
3092  q1 = s->current_picture.qscale_table[mb_pos];
3093  if (dc_pred_dir && c_avail && mb_pos)
3094  q2 = s->current_picture.qscale_table[mb_pos - 1];
3095  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3096  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3097  if ( dc_pred_dir && n == 1)
3098  q2 = q1;
3099  if (!dc_pred_dir && n == 2)
3100  q2 = q1;
3101  if (n == 3) q2 = q1;
3102 
3103  if (coded) {
3104  int last = 0, skip, value;
3105  int k;
3106 
3107  while (!last) {
3108  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3109  i += skip;
3110  if (i > 63)
3111  break;
3112  if (v->fcm == PROGRESSIVE)
3113  block[v->zz_8x8[0][i++]] = value;
3114  else {
3115  if (use_pred && (v->fcm == ILACE_FRAME)) {
3116  if (!dc_pred_dir) // top
3117  block[v->zz_8x8[2][i++]] = value;
3118  else // left
3119  block[v->zz_8x8[3][i++]] = value;
3120  } else {
3121  block[v->zzi_8x8[i++]] = value;
3122  }
3123  }
3124  }
3125 
3126  /* apply AC prediction if needed */
3127  if (use_pred) {
3128  /* scale predictors if needed*/
3129  if (q2 && q1 != q2) {
3130  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3131  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3132 
3133  if (q1 < 1)
3134  return AVERROR_INVALIDDATA;
3135  if (dc_pred_dir) { // left
3136  for (k = 1; k < 8; k++)
3137  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3138  } else { //top
3139  for (k = 1; k < 8; k++)
3140  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3141  }
3142  } else {
3143  if (dc_pred_dir) { // left
3144  for (k = 1; k < 8; k++)
3145  block[k << v->left_blk_sh] += ac_val[k];
3146  } else { // top
3147  for (k = 1; k < 8; k++)
3148  block[k << v->top_blk_sh] += ac_val[k + 8];
3149  }
3150  }
3151  }
3152  /* save AC coeffs for further prediction */
3153  for (k = 1; k < 8; k++) {
3154  ac_val2[k ] = block[k << v->left_blk_sh];
3155  ac_val2[k + 8] = block[k << v->top_blk_sh];
3156  }
3157 
3158  /* scale AC coeffs */
3159  for (k = 1; k < 64; k++)
3160  if (block[k]) {
3161  block[k] *= scale;
3162  if (!v->pquantizer)
3163  block[k] += (block[k] < 0) ? -mquant : mquant;
3164  }
3165 
3166  if (use_pred) i = 63;
3167  } else { // no AC coeffs
3168  int k;
3169 
3170  memset(ac_val2, 0, 16 * 2);
3171  if (dc_pred_dir) { // left
3172  if (use_pred) {
3173  memcpy(ac_val2, ac_val, 8 * 2);
3174  if (q2 && q1 != q2) {
3175  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3176  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3177  if (q1 < 1)
3178  return AVERROR_INVALIDDATA;
3179  for (k = 1; k < 8; k++)
3180  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3181  }
3182  }
3183  } else { // top
3184  if (use_pred) {
3185  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3186  if (q2 && q1 != q2) {
3187  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3188  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3189  if (q1 < 1)
3190  return AVERROR_INVALIDDATA;
3191  for (k = 1; k < 8; k++)
3192  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3193  }
3194  }
3195  }
3196 
3197  /* apply AC prediction if needed */
3198  if (use_pred) {
3199  if (dc_pred_dir) { // left
3200  for (k = 1; k < 8; k++) {
3201  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3202  if (!v->pquantizer && block[k << v->left_blk_sh])
3203  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3204  }
3205  } else { // top
3206  for (k = 1; k < 8; k++) {
3207  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3208  if (!v->pquantizer && block[k << v->top_blk_sh])
3209  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3210  }
3211  }
3212  i = 63;
3213  }
3214  }
3215  s->block_last_index[n] = i;
3216 
3217  return 0;
3218 }
3219 
3222 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3223  int mquant, int ttmb, int first_block,
3224  uint8_t *dst, int linesize, int skip_block,
3225  int *ttmb_out)
3226 {
3227  MpegEncContext *s = &v->s;
3228  GetBitContext *gb = &s->gb;
3229  int i, j;
3230  int subblkpat = 0;
3231  int scale, off, idx, last, skip, value;
3232  int ttblk = ttmb & 7;
3233  int pat = 0;
3234 
3235  s->bdsp.clear_block(block);
3236 
3237  if (ttmb == -1) {
3239  }
3240  if (ttblk == TT_4X4) {
3241  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3242  }
3243  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3244  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3245  || (!v->res_rtm_flag && !first_block))) {
3246  subblkpat = decode012(gb);
3247  if (subblkpat)
3248  subblkpat ^= 3; // swap decoded pattern bits
3249  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3250  ttblk = TT_8X4;
3251  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3252  ttblk = TT_4X8;
3253  }
3254  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3255 
3256  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3257  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3258  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3259  ttblk = TT_8X4;
3260  }
3261  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3262  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3263  ttblk = TT_4X8;
3264  }
3265  switch (ttblk) {
3266  case TT_8X8:
3267  pat = 0xF;
3268  i = 0;
3269  last = 0;
3270  while (!last) {
3271  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3272  i += skip;
3273  if (i > 63)
3274  break;
3275  if (!v->fcm)
3276  idx = v->zz_8x8[0][i++];
3277  else
3278  idx = v->zzi_8x8[i++];
3279  block[idx] = value * scale;
3280  if (!v->pquantizer)
3281  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3282  }
3283  if (!skip_block) {
3284  if (i == 1)
3285  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3286  else {
3287  v->vc1dsp.vc1_inv_trans_8x8(block);
3288  s->idsp.add_pixels_clamped(block, dst, linesize);
3289  }
3290  }
3291  break;
3292  case TT_4X4:
3293  pat = ~subblkpat & 0xF;
3294  for (j = 0; j < 4; j++) {
3295  last = subblkpat & (1 << (3 - j));
3296  i = 0;
3297  off = (j & 1) * 4 + (j & 2) * 16;
3298  while (!last) {
3299  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3300  i += skip;
3301  if (i > 15)
3302  break;
3303  if (!v->fcm)
3305  else
3306  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3307  block[idx + off] = value * scale;
3308  if (!v->pquantizer)
3309  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3310  }
3311  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3312  if (i == 1)
3313  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3314  else
3315  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3316  }
3317  }
3318  break;
3319  case TT_8X4:
3320  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3321  for (j = 0; j < 2; j++) {
3322  last = subblkpat & (1 << (1 - j));
3323  i = 0;
3324  off = j * 32;
3325  while (!last) {
3326  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3327  i += skip;
3328  if (i > 31)
3329  break;
3330  if (!v->fcm)
3331  idx = v->zz_8x4[i++] + off;
3332  else
3333  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3334  block[idx] = value * scale;
3335  if (!v->pquantizer)
3336  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3337  }
3338  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3339  if (i == 1)
3340  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3341  else
3342  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3343  }
3344  }
3345  break;
3346  case TT_4X8:
3347  pat = ~(subblkpat * 5) & 0xF;
3348  for (j = 0; j < 2; j++) {
3349  last = subblkpat & (1 << (1 - j));
3350  i = 0;
3351  off = j * 4;
3352  while (!last) {
3353  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3354  i += skip;
3355  if (i > 31)
3356  break;
3357  if (!v->fcm)
3358  idx = v->zz_4x8[i++] + off;
3359  else
3360  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3361  block[idx] = value * scale;
3362  if (!v->pquantizer)
3363  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3364  }
3365  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3366  if (i == 1)
3367  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3368  else
3369  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3370  }
3371  }
3372  break;
3373  }
3374  if (ttmb_out)
3375  *ttmb_out |= ttblk << (n * 4);
3376  return pat;
3377 }
3378  // Macroblock group
3380 
3381 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3382 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3383 
3385 {
3386  MpegEncContext *s = &v->s;
3387  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3388  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3389  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3390  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3391  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3392  uint8_t *dst;
3393 
3394  if (block_num > 3) {
3395  dst = s->dest[block_num - 3];
3396  } else {
3397  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3398  }
3399  if (s->mb_y != s->end_mb_y || block_num < 2) {
3400  int16_t (*mv)[2];
3401  int mv_stride;
3402 
3403  if (block_num > 3) {
3404  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3405  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3406  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3407  mv_stride = s->mb_stride;
3408  } else {
3409  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3410  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3411  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3412  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3413  mv_stride = s->b8_stride;
3414  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3415  }
3416 
3417  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3418  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3419  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3420  } else {
3421  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3422  if (idx == 3) {
3423  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3424  } else if (idx) {
3425  if (idx == 1)
3426  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3427  else
3428  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3429  }
3430  }
3431  }
3432 
3433  dst -= 4 * linesize;
3434  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3435  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3436  idx = (block_cbp | (block_cbp >> 2)) & 3;
3437  if (idx == 3) {
3438  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3439  } else if (idx) {
3440  if (idx == 1)
3441  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3442  else
3443  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3444  }
3445  }
3446 }
3447 
3449 {
3450  MpegEncContext *s = &v->s;
3451  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3452  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3453  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3454  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3455  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3456  uint8_t *dst;
3457 
3458  if (block_num > 3) {
3459  dst = s->dest[block_num - 3] - 8 * linesize;
3460  } else {
3461  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3462  }
3463 
3464  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3465  int16_t (*mv)[2];
3466 
3467  if (block_num > 3) {
3468  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3469  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3470  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3471  } else {
3472  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3473  : (mb_cbp >> ((block_num + 1) * 4));
3474  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3475  : (mb_is_intra >> ((block_num + 1) * 4));
3476  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3477  }
3478  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3479  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3480  } else {
3481  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3482  if (idx == 5) {
3483  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3484  } else if (idx) {
3485  if (idx == 1)
3486  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3487  else
3488  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3489  }
3490  }
3491  }
3492 
3493  dst -= 4;
3494  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3495  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3496  idx = (block_cbp | (block_cbp >> 1)) & 5;
3497  if (idx == 5) {
3498  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3499  } else if (idx) {
3500  if (idx == 1)
3501  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3502  else
3503  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3504  }
3505  }
3506 }
3507 
3509 {
3510  MpegEncContext *s = &v->s;
3511  int i;
3512 
3513  for (i = 0; i < 6; i++) {
3515  }
3516 
3517  /* V always precedes H, therefore we run H one MB before V;
3518  * at the end of a row, we catch up to complete the row */
3519  if (s->mb_x) {
3520  for (i = 0; i < 6; i++) {
3522  }
3523  if (s->mb_x == s->mb_width - 1) {
3524  s->mb_x++;
3526  for (i = 0; i < 6; i++) {
3528  }
3529  }
3530  }
3531 }
3532 
3536 {
3537  MpegEncContext *s = &v->s;
3538  GetBitContext *gb = &s->gb;
3539  int i, j;
3540  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3541  int cbp; /* cbp decoding stuff */
3542  int mqdiff, mquant; /* MB quantization */
3543  int ttmb = v->ttfrm; /* MB Transform type */
3544 
3545  int mb_has_coeffs = 1; /* last_flag */
3546  int dmv_x, dmv_y; /* Differential MV components */
3547  int index, index1; /* LUT indexes */
3548  int val, sign; /* temp values */
3549  int first_block = 1;
3550  int dst_idx, off;
3551  int skipped, fourmv;
3552  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3553 
3554  mquant = v->pq; /* lossy initialization */
3555 
3556  if (v->mv_type_is_raw)
3557  fourmv = get_bits1(gb);
3558  else
3559  fourmv = v->mv_type_mb_plane[mb_pos];
3560  if (v->skip_is_raw)
3561  skipped = get_bits1(gb);
3562  else
3563  skipped = v->s.mbskip_table[mb_pos];
3564 
3565  if (!fourmv) { /* 1MV mode */
3566  if (!skipped) {
3567  GET_MVDATA(dmv_x, dmv_y);
3568 
3569  if (s->mb_intra) {
3570  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3571  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3572  }
3574  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3575 
3576  /* FIXME Set DC val for inter block ? */
3577  if (s->mb_intra && !mb_has_coeffs) {
3578  GET_MQUANT();
3579  s->ac_pred = get_bits1(gb);
3580  cbp = 0;
3581  } else if (mb_has_coeffs) {
3582  if (s->mb_intra)
3583  s->ac_pred = get_bits1(gb);
3584  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3585  GET_MQUANT();
3586  } else {
3587  mquant = v->pq;
3588  cbp = 0;
3589  }
3590  s->current_picture.qscale_table[mb_pos] = mquant;
3591 
3592  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3593  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3594  VC1_TTMB_VLC_BITS, 2);
3595  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3596  dst_idx = 0;
3597  for (i = 0; i < 6; i++) {
3598  s->dc_val[0][s->block_index[i]] = 0;
3599  dst_idx += i >> 2;
3600  val = ((cbp >> (5 - i)) & 1);
3601  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3602  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3603  if (s->mb_intra) {
3604  /* check if prediction blocks A and C are available */
3605  v->a_avail = v->c_avail = 0;
3606  if (i == 2 || i == 3 || !s->first_slice_line)
3607  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3608  if (i == 1 || i == 3 || s->mb_x)
3609  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3610 
3611  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3612  (i & 4) ? v->codingset2 : v->codingset);
3613  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3614  continue;
3615  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3616  if (v->rangeredfrm)
3617  for (j = 0; j < 64; j++)
3618  s->block[i][j] <<= 1;
3620  s->dest[dst_idx] + off,
3621  i & 4 ? s->uvlinesize
3622  : s->linesize);
3623  if (v->pq >= 9 && v->overlap) {
3624  if (v->c_avail)
3625  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3626  if (v->a_avail)
3627  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3628  }
3629  block_cbp |= 0xF << (i << 2);
3630  block_intra |= 1 << i;
3631  } else if (val) {
3632  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3633  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3634  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3635  block_cbp |= pat << (i << 2);
3636  if (!v->ttmbf && ttmb < 8)
3637  ttmb = -1;
3638  first_block = 0;
3639  }
3640  }
3641  } else { // skipped
3642  s->mb_intra = 0;
3643  for (i = 0; i < 6; i++) {
3644  v->mb_type[0][s->block_index[i]] = 0;
3645  s->dc_val[0][s->block_index[i]] = 0;
3646  }
3647  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3648  s->current_picture.qscale_table[mb_pos] = 0;
3649  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3650  vc1_mc_1mv(v, 0);
3651  }
3652  } else { // 4MV mode
3653  if (!skipped /* unskipped MB */) {
3654  int intra_count = 0, coded_inter = 0;
3655  int is_intra[6], is_coded[6];
3656  /* Get CBPCY */
3657  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3658  for (i = 0; i < 6; i++) {
3659  val = ((cbp >> (5 - i)) & 1);
3660  s->dc_val[0][s->block_index[i]] = 0;
3661  s->mb_intra = 0;
3662  if (i < 4) {
3663  dmv_x = dmv_y = 0;
3664  s->mb_intra = 0;
3665  mb_has_coeffs = 0;
3666  if (val) {
3667  GET_MVDATA(dmv_x, dmv_y);
3668  }
3669  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3670  if (!s->mb_intra)
3671  vc1_mc_4mv_luma(v, i, 0, 0);
3672  intra_count += s->mb_intra;
3673  is_intra[i] = s->mb_intra;
3674  is_coded[i] = mb_has_coeffs;
3675  }
3676  if (i & 4) {
3677  is_intra[i] = (intra_count >= 3);
3678  is_coded[i] = val;
3679  }
3680  if (i == 4)
3681  vc1_mc_4mv_chroma(v, 0);
3682  v->mb_type[0][s->block_index[i]] = is_intra[i];
3683  if (!coded_inter)
3684  coded_inter = !is_intra[i] & is_coded[i];
3685  }
3686  // if there are no coded blocks then don't do anything more
3687  dst_idx = 0;
3688  if (!intra_count && !coded_inter)
3689  goto end;
3690  GET_MQUANT();
3691  s->current_picture.qscale_table[mb_pos] = mquant;
3692  /* test if block is intra and has pred */
3693  {
3694  int intrapred = 0;
3695  for (i = 0; i < 6; i++)
3696  if (is_intra[i]) {
3697  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3698  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3699  intrapred = 1;
3700  break;
3701  }
3702  }
3703  if (intrapred)
3704  s->ac_pred = get_bits1(gb);
3705  else
3706  s->ac_pred = 0;
3707  }
3708  if (!v->ttmbf && coded_inter)
3710  for (i = 0; i < 6; i++) {
3711  dst_idx += i >> 2;
3712  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3713  s->mb_intra = is_intra[i];
3714  if (is_intra[i]) {
3715  /* check if prediction blocks A and C are available */
3716  v->a_avail = v->c_avail = 0;
3717  if (i == 2 || i == 3 || !s->first_slice_line)
3718  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3719  if (i == 1 || i == 3 || s->mb_x)
3720  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3721 
3722  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3723  (i & 4) ? v->codingset2 : v->codingset);
3724  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3725  continue;
3726  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3727  if (v->rangeredfrm)
3728  for (j = 0; j < 64; j++)
3729  s->block[i][j] <<= 1;
3731  s->dest[dst_idx] + off,
3732  (i & 4) ? s->uvlinesize
3733  : s->linesize);
3734  if (v->pq >= 9 && v->overlap) {
3735  if (v->c_avail)
3736  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3737  if (v->a_avail)
3738  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3739  }
3740  block_cbp |= 0xF << (i << 2);
3741  block_intra |= 1 << i;
3742  } else if (is_coded[i]) {
3743  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3744  first_block, s->dest[dst_idx] + off,
3745  (i & 4) ? s->uvlinesize : s->linesize,
3746  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3747  &block_tt);
3748  block_cbp |= pat << (i << 2);
3749  if (!v->ttmbf && ttmb < 8)
3750  ttmb = -1;
3751  first_block = 0;
3752  }
3753  }
3754  } else { // skipped MB
3755  s->mb_intra = 0;
3756  s->current_picture.qscale_table[mb_pos] = 0;
3757  for (i = 0; i < 6; i++) {
3758  v->mb_type[0][s->block_index[i]] = 0;
3759  s->dc_val[0][s->block_index[i]] = 0;
3760  }
3761  for (i = 0; i < 4; i++) {
3762  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3763  vc1_mc_4mv_luma(v, i, 0, 0);
3764  }
3765  vc1_mc_4mv_chroma(v, 0);
3766  s->current_picture.qscale_table[mb_pos] = 0;
3767  }
3768  }
3769 end:
3770  v->cbp[s->mb_x] = block_cbp;
3771  v->ttblk[s->mb_x] = block_tt;
3772  v->is_intra[s->mb_x] = block_intra;
3773 
3774  return 0;
3775 }
3776 
3777 /* Decode one macroblock in an interlaced frame p picture */
3778 
3780 {
3781  MpegEncContext *s = &v->s;
3782  GetBitContext *gb = &s->gb;
3783  int i;
3784  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3785  int cbp = 0; /* cbp decoding stuff */
3786  int mqdiff, mquant; /* MB quantization */
3787  int ttmb = v->ttfrm; /* MB Transform type */
3788 
3789  int mb_has_coeffs = 1; /* last_flag */
3790  int dmv_x, dmv_y; /* Differential MV components */
3791  int val; /* temp value */
3792  int first_block = 1;
3793  int dst_idx, off;
3794  int skipped, fourmv = 0, twomv = 0;
3795  int block_cbp = 0, pat, block_tt = 0;
3796  int idx_mbmode = 0, mvbp;
3797  int stride_y, fieldtx;
3798 
3799  mquant = v->pq; /* Loosy initialization */
3800 
3801  if (v->skip_is_raw)
3802  skipped = get_bits1(gb);
3803  else
3804  skipped = v->s.mbskip_table[mb_pos];
3805  if (!skipped) {
3806  if (v->fourmvswitch)
3807  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3808  else
3809  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3810  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3811  /* store the motion vector type in a flag (useful later) */
3812  case MV_PMODE_INTFR_4MV:
3813  fourmv = 1;
3814  v->blk_mv_type[s->block_index[0]] = 0;
3815  v->blk_mv_type[s->block_index[1]] = 0;
3816  v->blk_mv_type[s->block_index[2]] = 0;
3817  v->blk_mv_type[s->block_index[3]] = 0;
3818  break;
3820  fourmv = 1;
3821  v->blk_mv_type[s->block_index[0]] = 1;
3822  v->blk_mv_type[s->block_index[1]] = 1;
3823  v->blk_mv_type[s->block_index[2]] = 1;
3824  v->blk_mv_type[s->block_index[3]] = 1;
3825  break;
3827  twomv = 1;
3828  v->blk_mv_type[s->block_index[0]] = 1;
3829  v->blk_mv_type[s->block_index[1]] = 1;
3830  v->blk_mv_type[s->block_index[2]] = 1;
3831  v->blk_mv_type[s->block_index[3]] = 1;
3832  break;
3833  case MV_PMODE_INTFR_1MV:
3834  v->blk_mv_type[s->block_index[0]] = 0;
3835  v->blk_mv_type[s->block_index[1]] = 0;
3836  v->blk_mv_type[s->block_index[2]] = 0;
3837  v->blk_mv_type[s->block_index[3]] = 0;
3838  break;
3839  }
3840  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3841  for (i = 0; i < 4; i++) {
3842  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3843  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3844  }
3845  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3846  s->mb_intra = v->is_intra[s->mb_x] = 1;
3847  for (i = 0; i < 6; i++)
3848  v->mb_type[0][s->block_index[i]] = 1;
3849  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3850  mb_has_coeffs = get_bits1(gb);
3851  if (mb_has_coeffs)
3852  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3853  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3854  GET_MQUANT();
3855  s->current_picture.qscale_table[mb_pos] = mquant;
3856  /* Set DC scale - y and c use the same (not sure if necessary here) */
3857  s->y_dc_scale = s->y_dc_scale_table[mquant];
3858  s->c_dc_scale = s->c_dc_scale_table[mquant];
3859  dst_idx = 0;
3860  for (i = 0; i < 6; i++) {
3861  s->dc_val[0][s->block_index[i]] = 0;
3862  dst_idx += i >> 2;
3863  val = ((cbp >> (5 - i)) & 1);
3864  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3865  v->a_avail = v->c_avail = 0;
3866  if (i == 2 || i == 3 || !s->first_slice_line)
3867  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3868  if (i == 1 || i == 3 || s->mb_x)
3869  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3870 
3871  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3872  (i & 4) ? v->codingset2 : v->codingset);
3873  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3874  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3875  if (i < 4) {
3876  stride_y = s->linesize << fieldtx;
3877  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3878  } else {
3879  stride_y = s->uvlinesize;
3880  off = 0;
3881  }
3883  s->dest[dst_idx] + off,
3884  stride_y);
3885  //TODO: loop filter
3886  }
3887 
3888  } else { // inter MB
3889  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3890  if (mb_has_coeffs)
3891  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3892  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3894  } else {
3895  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3896  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3898  }
3899  }
3900  s->mb_intra = v->is_intra[s->mb_x] = 0;
3901  for (i = 0; i < 6; i++)
3902  v->mb_type[0][s->block_index[i]] = 0;
3903  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3904  /* for all motion vector read MVDATA and motion compensate each block */
3905  dst_idx = 0;
3906  if (fourmv) {
3907  mvbp = v->fourmvbp;
3908  for (i = 0; i < 6; i++) {
3909  if (i < 4) {
3910  dmv_x = dmv_y = 0;
3911  val = ((mvbp >> (3 - i)) & 1);
3912  if (val) {
3913  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3914  }
3915  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3916  vc1_mc_4mv_luma(v, i, 0, 0);
3917  } else if (i == 4) {
3918  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3919  }
3920  }
3921  } else if (twomv) {
3922  mvbp = v->twomvbp;
3923  dmv_x = dmv_y = 0;
3924  if (mvbp & 2) {
3925  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3926  }
3927  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3928  vc1_mc_4mv_luma(v, 0, 0, 0);
3929  vc1_mc_4mv_luma(v, 1, 0, 0);
3930  dmv_x = dmv_y = 0;
3931  if (mvbp & 1) {
3932  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3933  }
3934  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3935  vc1_mc_4mv_luma(v, 2, 0, 0);
3936  vc1_mc_4mv_luma(v, 3, 0, 0);
3937  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3938  } else {
3939  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3940  dmv_x = dmv_y = 0;
3941  if (mvbp) {
3942  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3943  }
3944  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3945  vc1_mc_1mv(v, 0);
3946  }
3947  if (cbp)
3948  GET_MQUANT(); // p. 227
3949  s->current_picture.qscale_table[mb_pos] = mquant;
3950  if (!v->ttmbf && cbp)
3952  for (i = 0; i < 6; i++) {
3953  s->dc_val[0][s->block_index[i]] = 0;
3954  dst_idx += i >> 2;
3955  val = ((cbp >> (5 - i)) & 1);
3956  if (!fieldtx)
3957  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3958  else
3959  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3960  if (val) {
3961  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3962  first_block, s->dest[dst_idx] + off,
3963  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3964  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3965  block_cbp |= pat << (i << 2);
3966  if (!v->ttmbf && ttmb < 8)
3967  ttmb = -1;
3968  first_block = 0;
3969  }
3970  }
3971  }
3972  } else { // skipped
3973  s->mb_intra = v->is_intra[s->mb_x] = 0;
3974  for (i = 0; i < 6; i++) {
3975  v->mb_type[0][s->block_index[i]] = 0;
3976  s->dc_val[0][s->block_index[i]] = 0;
3977  }
3978  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3979  s->current_picture.qscale_table[mb_pos] = 0;
3980  v->blk_mv_type[s->block_index[0]] = 0;
3981  v->blk_mv_type[s->block_index[1]] = 0;
3982  v->blk_mv_type[s->block_index[2]] = 0;
3983  v->blk_mv_type[s->block_index[3]] = 0;
3984  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3985  vc1_mc_1mv(v, 0);
3986  }
3987  if (s->mb_x == s->mb_width - 1)
3988  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3989  return 0;
3990 }
3991 
3993 {
3994  MpegEncContext *s = &v->s;
3995  GetBitContext *gb = &s->gb;
3996  int i;
3997  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3998  int cbp = 0; /* cbp decoding stuff */
3999  int mqdiff, mquant; /* MB quantization */
4000  int ttmb = v->ttfrm; /* MB Transform type */
4001 
4002  int mb_has_coeffs = 1; /* last_flag */
4003  int dmv_x, dmv_y; /* Differential MV components */
4004  int val; /* temp values */
4005  int first_block = 1;
4006  int dst_idx, off;
4007  int pred_flag;
4008  int block_cbp = 0, pat, block_tt = 0;
4009  int idx_mbmode = 0;
4010 
4011  mquant = v->pq; /* Loosy initialization */
4012 
4013  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4014  if (idx_mbmode <= 1) { // intra MB
4015  s->mb_intra = v->is_intra[s->mb_x] = 1;
4016  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4017  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4018  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4019  GET_MQUANT();
4020  s->current_picture.qscale_table[mb_pos] = mquant;
4021  /* Set DC scale - y and c use the same (not sure if necessary here) */
4022  s->y_dc_scale = s->y_dc_scale_table[mquant];
4023  s->c_dc_scale = s->c_dc_scale_table[mquant];
4024  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4025  mb_has_coeffs = idx_mbmode & 1;
4026  if (mb_has_coeffs)
4027  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4028  dst_idx = 0;
4029  for (i = 0; i < 6; i++) {
4030  s->dc_val[0][s->block_index[i]] = 0;
4031  v->mb_type[0][s->block_index[i]] = 1;
4032  dst_idx += i >> 2;
4033  val = ((cbp >> (5 - i)) & 1);
4034  v->a_avail = v->c_avail = 0;
4035  if (i == 2 || i == 3 || !s->first_slice_line)
4036  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4037  if (i == 1 || i == 3 || s->mb_x)
4038  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4039 
4040  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4041  (i & 4) ? v->codingset2 : v->codingset);
4042  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4043  continue;
4044  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4045  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4047  s->dest[dst_idx] + off,
4048  (i & 4) ? s->uvlinesize
4049  : s->linesize);
4050  // TODO: loop filter
4051  }
4052  } else {
4053  s->mb_intra = v->is_intra[s->mb_x] = 0;
4054  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4055  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4056  if (idx_mbmode <= 5) { // 1-MV
4057  dmv_x = dmv_y = pred_flag = 0;
4058  if (idx_mbmode & 1) {
4059  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4060  }
4061  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4062  vc1_mc_1mv(v, 0);
4063  mb_has_coeffs = !(idx_mbmode & 2);
4064  } else { // 4-MV
4066  for (i = 0; i < 6; i++) {
4067  if (i < 4) {
4068  dmv_x = dmv_y = pred_flag = 0;
4069  val = ((v->fourmvbp >> (3 - i)) & 1);
4070  if (val) {
4071  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4072  }
4073  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4074  vc1_mc_4mv_luma(v, i, 0, 0);
4075  } else if (i == 4)
4076  vc1_mc_4mv_chroma(v, 0);
4077  }
4078  mb_has_coeffs = idx_mbmode & 1;
4079  }
4080  if (mb_has_coeffs)
4081  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4082  if (cbp) {
4083  GET_MQUANT();
4084  }
4085  s->current_picture.qscale_table[mb_pos] = mquant;
4086  if (!v->ttmbf && cbp) {
4088  }
4089  dst_idx = 0;
4090  for (i = 0; i < 6; i++) {
4091  s->dc_val[0][s->block_index[i]] = 0;
4092  dst_idx += i >> 2;
4093  val = ((cbp >> (5 - i)) & 1);
4094  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4095  if (val) {
4096  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4097  first_block, s->dest[dst_idx] + off,
4098  (i & 4) ? s->uvlinesize : s->linesize,
4099  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4100  &block_tt);
4101  block_cbp |= pat << (i << 2);
4102  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4103  first_block = 0;
4104  }
4105  }
4106  }
4107  if (s->mb_x == s->mb_width - 1)
4108  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4109  return 0;
4110 }
4111 
4115 {
4116  MpegEncContext *s = &v->s;
4117  GetBitContext *gb = &s->gb;
4118  int i, j;
4119  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4120  int cbp = 0; /* cbp decoding stuff */
4121  int mqdiff, mquant; /* MB quantization */
4122  int ttmb = v->ttfrm; /* MB Transform type */
4123  int mb_has_coeffs = 0; /* last_flag */
4124  int index, index1; /* LUT indexes */
4125  int val, sign; /* temp values */
4126  int first_block = 1;
4127  int dst_idx, off;
4128  int skipped, direct;
4129  int dmv_x[2], dmv_y[2];
4130  int bmvtype = BMV_TYPE_BACKWARD;
4131 
4132  mquant = v->pq; /* lossy initialization */
4133  s->mb_intra = 0;
4134 
4135  if (v->dmb_is_raw)
4136  direct = get_bits1(gb);
4137  else
4138  direct = v->direct_mb_plane[mb_pos];
4139  if (v->skip_is_raw)
4140  skipped = get_bits1(gb);
4141  else
4142  skipped = v->s.mbskip_table[mb_pos];
4143 
4144  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4145  for (i = 0; i < 6; i++) {
4146  v->mb_type[0][s->block_index[i]] = 0;
4147  s->dc_val[0][s->block_index[i]] = 0;
4148  }
4149  s->current_picture.qscale_table[mb_pos] = 0;
4150 
4151  if (!direct) {
4152  if (!skipped) {
4153  GET_MVDATA(dmv_x[0], dmv_y[0]);
4154  dmv_x[1] = dmv_x[0];
4155  dmv_y[1] = dmv_y[0];
4156  }
4157  if (skipped || !s->mb_intra) {
4158  bmvtype = decode012(gb);
4159  switch (bmvtype) {
4160  case 0:
4161  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4162  break;
4163  case 1:
4164  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4165  break;
4166  case 2:
4167  bmvtype = BMV_TYPE_INTERPOLATED;
4168  dmv_x[0] = dmv_y[0] = 0;
4169  }
4170  }
4171  }
4172  for (i = 0; i < 6; i++)
4173  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4174 
4175  if (skipped) {
4176  if (direct)
4177  bmvtype = BMV_TYPE_INTERPOLATED;
4178  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4179  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4180  return;
4181  }
4182  if (direct) {
4183  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4184  GET_MQUANT();
4185  s->mb_intra = 0;
4186  s->current_picture.qscale_table[mb_pos] = mquant;
4187  if (!v->ttmbf)
4189  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4190  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4191  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4192  } else {
4193  if (!mb_has_coeffs && !s->mb_intra) {
4194  /* no coded blocks - effectively skipped */
4195  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4196  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4197  return;
4198  }
4199  if (s->mb_intra && !mb_has_coeffs) {
4200  GET_MQUANT();
4201  s->current_picture.qscale_table[mb_pos] = mquant;
4202  s->ac_pred = get_bits1(gb);
4203  cbp = 0;
4204  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4205  } else {
4206  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4207  GET_MVDATA(dmv_x[0], dmv_y[0]);
4208  if (!mb_has_coeffs) {
4209  /* interpolated skipped block */
4210  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4211  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4212  return;
4213  }
4214  }
4215  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4216  if (!s->mb_intra) {
4217  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4218  }
4219  if (s->mb_intra)
4220  s->ac_pred = get_bits1(gb);
4221  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4222  GET_MQUANT();
4223  s->current_picture.qscale_table[mb_pos] = mquant;
4224  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4226  }
4227  }
4228  dst_idx = 0;
4229  for (i = 0; i < 6; i++) {
4230  s->dc_val[0][s->block_index[i]] = 0;
4231  dst_idx += i >> 2;
4232  val = ((cbp >> (5 - i)) & 1);
4233  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4234  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4235  if (s->mb_intra) {
4236  /* check if prediction blocks A and C are available */
4237  v->a_avail = v->c_avail = 0;
4238  if (i == 2 || i == 3 || !s->first_slice_line)
4239  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4240  if (i == 1 || i == 3 || s->mb_x)
4241  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4242 
4243  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4244  (i & 4) ? v->codingset2 : v->codingset);
4245  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4246  continue;
4247  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4248  if (v->rangeredfrm)
4249  for (j = 0; j < 64; j++)
4250  s->block[i][j] <<= 1;
4252  s->dest[dst_idx] + off,
4253  i & 4 ? s->uvlinesize
4254  : s->linesize);
4255  } else if (val) {
4256  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4257  first_block, s->dest[dst_idx] + off,
4258  (i & 4) ? s->uvlinesize : s->linesize,
4259  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4260  if (!v->ttmbf && ttmb < 8)
4261  ttmb = -1;
4262  first_block = 0;
4263  }
4264  }
4265 }
4266 
4270 {
4271  MpegEncContext *s = &v->s;
4272  GetBitContext *gb = &s->gb;
4273  int i, j;
4274  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4275  int cbp = 0; /* cbp decoding stuff */
4276  int mqdiff, mquant; /* MB quantization */
4277  int ttmb = v->ttfrm; /* MB Transform type */
4278  int mb_has_coeffs = 0; /* last_flag */
4279  int val; /* temp value */
4280  int first_block = 1;
4281  int dst_idx, off;
4282  int fwd;
4283  int dmv_x[2], dmv_y[2], pred_flag[2];
4284  int bmvtype = BMV_TYPE_BACKWARD;
4285  int idx_mbmode, interpmvp;
4286 
4287  mquant = v->pq; /* Loosy initialization */
4288  s->mb_intra = 0;
4289 
4290  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4291  if (idx_mbmode <= 1) { // intra MB
4292  s->mb_intra = v->is_intra[s->mb_x] = 1;
4293  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4294  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4295  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4296  GET_MQUANT();
4297  s->current_picture.qscale_table[mb_pos] = mquant;
4298  /* Set DC scale - y and c use the same (not sure if necessary here) */
4299  s->y_dc_scale = s->y_dc_scale_table[mquant];
4300  s->c_dc_scale = s->c_dc_scale_table[mquant];
4301  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4302  mb_has_coeffs = idx_mbmode & 1;
4303  if (mb_has_coeffs)
4304  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4305  dst_idx = 0;
4306  for (i = 0; i < 6; i++) {
4307  s->dc_val[0][s->block_index[i]] = 0;
4308  dst_idx += i >> 2;
4309  val = ((cbp >> (5 - i)) & 1);
4310  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4311  v->a_avail = v->c_avail = 0;
4312  if (i == 2 || i == 3 || !s->first_slice_line)
4313  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4314  if (i == 1 || i == 3 || s->mb_x)
4315  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4316 
4317  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4318  (i & 4) ? v->codingset2 : v->codingset);
4319  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4320  continue;
4321  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4322  if (v->rangeredfrm)
4323  for (j = 0; j < 64; j++)
4324  s->block[i][j] <<= 1;
4325  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4327  s->dest[dst_idx] + off,
4328  (i & 4) ? s->uvlinesize
4329  : s->linesize);
4330  // TODO: yet to perform loop filter
4331  }
4332  } else {
4333  s->mb_intra = v->is_intra[s->mb_x] = 0;
4334  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4335  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4336  if (v->fmb_is_raw)
4337  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4338  else
4339  fwd = v->forward_mb_plane[mb_pos];
4340  if (idx_mbmode <= 5) { // 1-MV
4341  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4342  pred_flag[0] = pred_flag[1] = 0;
4343  if (fwd)
4344  bmvtype = BMV_TYPE_FORWARD;
4345  else {
4346  bmvtype = decode012(gb);
4347  switch (bmvtype) {
4348  case 0:
4349  bmvtype = BMV_TYPE_BACKWARD;
4350  break;
4351  case 1:
4352  bmvtype = BMV_TYPE_DIRECT;
4353  break;
4354  case 2:
4355  bmvtype = BMV_TYPE_INTERPOLATED;
4356  interpmvp = get_bits1(gb);
4357  }
4358  }
4359  v->bmvtype = bmvtype;
4360  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4361  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4362  }
4363  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4364  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4365  }
4366  if (bmvtype == BMV_TYPE_DIRECT) {
4367  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4368  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4369  }
4370  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4371  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4372  mb_has_coeffs = !(idx_mbmode & 2);
4373  } else { // 4-MV
4374  if (fwd)
4375  bmvtype = BMV_TYPE_FORWARD;
4376  v->bmvtype = bmvtype;
4378  for (i = 0; i < 6; i++) {
4379  if (i < 4) {
4380  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4381  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4382  val = ((v->fourmvbp >> (3 - i)) & 1);
4383  if (val) {
4384  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4385  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4386  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4387  }
4388  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4389  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4390  } else if (i == 4)
4391  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4392  }
4393  mb_has_coeffs = idx_mbmode & 1;
4394  }
4395  if (mb_has_coeffs)
4396  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4397  if (cbp) {
4398  GET_MQUANT();
4399  }
4400  s->current_picture.qscale_table[mb_pos] = mquant;
4401  if (!v->ttmbf && cbp) {
4403  }
4404  dst_idx = 0;
4405  for (i = 0; i < 6; i++) {
4406  s->dc_val[0][s->block_index[i]] = 0;
4407  dst_idx += i >> 2;
4408  val = ((cbp >> (5 - i)) & 1);
4409  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4410  if (val) {
4411  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4412  first_block, s->dest[dst_idx] + off,
4413  (i & 4) ? s->uvlinesize : s->linesize,
4414  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4415  if (!v->ttmbf && ttmb < 8)
4416  ttmb = -1;
4417  first_block = 0;
4418  }
4419  }
4420  }
4421 }
4422 
4426 {
4427  MpegEncContext *s = &v->s;
4428  GetBitContext *gb = &s->gb;
4429  int i, j;
4430  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4431  int cbp = 0; /* cbp decoding stuff */
4432  int mqdiff, mquant; /* MB quantization */
4433  int ttmb = v->ttfrm; /* MB Transform type */
4434  int mvsw = 0; /* motion vector switch */
4435  int mb_has_coeffs = 1; /* last_flag */
4436  int dmv_x, dmv_y; /* Differential MV components */
4437  int val; /* temp value */
4438  int first_block = 1;
4439  int dst_idx, off;
4440  int skipped, direct, twomv = 0;
4441  int block_cbp = 0, pat, block_tt = 0;
4442  int idx_mbmode = 0, mvbp;
4443  int stride_y, fieldtx;
4444  int bmvtype = BMV_TYPE_BACKWARD;
4445  int dir, dir2;
4446 
4447  mquant = v->pq; /* Lossy initialization */
4448  s->mb_intra = 0;
4449  if (v->skip_is_raw)
4450  skipped = get_bits1(gb);
4451  else
4452  skipped = v->s.mbskip_table[mb_pos];
4453 
4454  if (!skipped) {
4455  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4456  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4457  twomv = 1;
4458  v->blk_mv_type[s->block_index[0]] = 1;
4459  v->blk_mv_type[s->block_index[1]] = 1;
4460  v->blk_mv_type[s->block_index[2]] = 1;
4461  v->blk_mv_type[s->block_index[3]] = 1;
4462  } else {
4463  v->blk_mv_type[s->block_index[0]] = 0;
4464  v->blk_mv_type[s->block_index[1]] = 0;
4465  v->blk_mv_type[s->block_index[2]] = 0;
4466  v->blk_mv_type[s->block_index[3]] = 0;
4467  }
4468  }
4469 
4470  if (v->dmb_is_raw)
4471  direct = get_bits1(gb);
4472  else
4473  direct = v->direct_mb_plane[mb_pos];
4474 
4475  if (direct) {
4476  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4477  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4478  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4479  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4480 
4481  if (twomv) {
4482  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4483  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4484  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4485  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4486 
4487  for (i = 1; i < 4; i += 2) {
4488  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4489  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4490  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4491  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4492  }
4493  } else {
4494  for (i = 1; i < 4; i++) {
4495  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4496  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4497  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4498  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4499  }
4500  }
4501  }
4502 
4503  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4504  for (i = 0; i < 4; i++) {
4505  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4506  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4507  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4508  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4509  }
4510  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4511  s->mb_intra = v->is_intra[s->mb_x] = 1;
4512  for (i = 0; i < 6; i++)
4513  v->mb_type[0][s->block_index[i]] = 1;
4514  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4515  mb_has_coeffs = get_bits1(gb);
4516  if (mb_has_coeffs)
4517  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4518  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4519  GET_MQUANT();
4520  s->current_picture.qscale_table[mb_pos] = mquant;
4521  /* Set DC scale - y and c use the same (not sure if necessary here) */
4522  s->y_dc_scale = s->y_dc_scale_table[mquant];
4523  s->c_dc_scale = s->c_dc_scale_table[mquant];
4524  dst_idx = 0;
4525  for (i = 0; i < 6; i++) {
4526  s->dc_val[0][s->block_index[i]] = 0;
4527  dst_idx += i >> 2;
4528  val = ((cbp >> (5 - i)) & 1);
4529  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4530  v->a_avail = v->c_avail = 0;
4531  if (i == 2 || i == 3 || !s->first_slice_line)
4532  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4533  if (i == 1 || i == 3 || s->mb_x)
4534  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4535 
4536  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4537  (i & 4) ? v->codingset2 : v->codingset);
4538  if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4539  continue;
4540  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4541  if (i < 4) {
4542  stride_y = s->linesize << fieldtx;
4543  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4544  } else {
4545  stride_y = s->uvlinesize;
4546  off = 0;
4547  }
4549  s->dest[dst_idx] + off,
4550  stride_y);
4551  }
4552  } else {
4553  s->mb_intra = v->is_intra[s->mb_x] = 0;
4554  if (!direct) {
4555  if (skipped || !s->mb_intra) {
4556  bmvtype = decode012(gb);
4557  switch (bmvtype) {
4558  case 0:
4559  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4560  break;
4561  case 1:
4562  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4563  break;
4564  case 2:
4565  bmvtype = BMV_TYPE_INTERPOLATED;
4566  }
4567  }
4568 
4569  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4570  mvsw = get_bits1(gb);
4571  }
4572 
4573  if (!skipped) { // inter MB
4574  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4575  if (mb_has_coeffs)
4576  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4577  if (!direct) {
4578  if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4580  } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4582  }
4583  }
4584 
4585  for (i = 0; i < 6; i++)
4586  v->mb_type[0][s->block_index[i]] = 0;
4587  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4588  /* for all motion vector read MVDATA and motion compensate each block */
4589  dst_idx = 0;
4590  if (direct) {
4591  if (twomv) {
4592  for (i = 0; i < 4; i++) {
4593  vc1_mc_4mv_luma(v, i, 0, 0);
4594  vc1_mc_4mv_luma(v, i, 1, 1);
4595  }
4596  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4597  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4598  } else {
4599  vc1_mc_1mv(v, 0);
4600  vc1_interp_mc(v);
4601  }
4602  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4603  mvbp = v->fourmvbp;
4604  for (i = 0; i < 4; i++) {
4605  dir = i==1 || i==3;
4606  dmv_x = dmv_y = 0;
4607  val = ((mvbp >> (3 - i)) & 1);
4608  if (val)
4609  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4610  j = i > 1 ? 2 : 0;
4611  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4612  vc1_mc_4mv_luma(v, j, dir, dir);
4613  vc1_mc_4mv_luma(v, j+1, dir, dir);
4614  }
4615 
4616  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4617  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4618  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4619  mvbp = v->twomvbp;
4620  dmv_x = dmv_y = 0;
4621  if (mvbp & 2)
4622  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4623 
4624  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4625  vc1_mc_1mv(v, 0);
4626 
4627  dmv_x = dmv_y = 0;
4628  if (mvbp & 1)
4629  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4630 
4631  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4632  vc1_interp_mc(v);
4633  } else if (twomv) {
4634  dir = bmvtype == BMV_TYPE_BACKWARD;
4635  dir2 = dir;
4636  if (mvsw)
4637  dir2 = !dir;
4638  mvbp = v->twomvbp;
4639  dmv_x = dmv_y = 0;
4640  if (mvbp & 2)
4641  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4642  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4643 
4644  dmv_x = dmv_y = 0;
4645  if (mvbp & 1)
4646  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4647  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4648 
4649  if (mvsw) {
4650  for (i = 0; i < 2; i++) {
4651  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4652  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4653  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4654  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4655  }
4656  } else {
4657  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4658  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4659  }
4660 
4661  vc1_mc_4mv_luma(v, 0, dir, 0);
4662  vc1_mc_4mv_luma(v, 1, dir, 0);
4663  vc1_mc_4mv_luma(v, 2, dir2, 0);
4664  vc1_mc_4mv_luma(v, 3, dir2, 0);
4665  vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4666  } else {
4667  dir = bmvtype == BMV_TYPE_BACKWARD;
4668 
4669  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4670  dmv_x = dmv_y = 0;
4671  if (mvbp)
4672  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4673 
4674  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4675  v->blk_mv_type[s->block_index[0]] = 1;
4676  v->blk_mv_type[s->block_index[1]] = 1;
4677  v->blk_mv_type[s->block_index[2]] = 1;
4678  v->blk_mv_type[s->block_index[3]] = 1;
4679  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4680  for (i = 0; i < 2; i++) {
4681  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4682  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4683  }
4684  vc1_mc_1mv(v, dir);
4685  }
4686 
4687  if (cbp)
4688  GET_MQUANT(); // p. 227
4689  s->current_picture.qscale_table[mb_pos] = mquant;
4690  if (!v->ttmbf && cbp)
4692  for (i = 0; i < 6; i++) {
4693  s->dc_val[0][s->block_index[i]] = 0;
4694  dst_idx += i >> 2;
4695  val = ((cbp >> (5 - i)) & 1);
4696  if (!fieldtx)
4697  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4698  else
4699  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4700  if (val) {
4701  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4702  first_block, s->dest[dst_idx] + off,
4703  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4704  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4705  block_cbp |= pat << (i << 2);
4706  if (!v->ttmbf && ttmb < 8)
4707  ttmb = -1;
4708  first_block = 0;
4709  }
4710  }
4711 
4712  } else { // skipped
4713  dir = 0;
4714  for (i = 0; i < 6; i++) {
4715  v->mb_type[0][s->block_index[i]] = 0;
4716  s->dc_val[0][s->block_index[i]] = 0;
4717  }
4718  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4719  s->current_picture.qscale_table[mb_pos] = 0;
4720  v->blk_mv_type[s->block_index[0]] = 0;
4721  v->blk_mv_type[s->block_index[1]] = 0;
4722  v->blk_mv_type[s->block_index[2]] = 0;
4723  v->blk_mv_type[s->block_index[3]] = 0;
4724 
4725  if (!direct) {
4726  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4727  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4728  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4729  } else {
4730  dir = bmvtype == BMV_TYPE_BACKWARD;
4731  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4732  if (mvsw) {
4733  int dir2 = dir;
4734  if (mvsw)
4735  dir2 = !dir;
4736  for (i = 0; i < 2; i++) {
4737  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4738  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4739  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4740  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4741  }
4742  } else {
4743  v->blk_mv_type[s->block_index[0]] = 1;
4744  v->blk_mv_type[s->block_index[1]] = 1;
4745  v->blk_mv_type[s->block_index[2]] = 1;
4746  v->blk_mv_type[s->block_index[3]] = 1;
4747  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4748  for (i = 0; i < 2; i++) {
4749  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4750  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4751  }
4752  }
4753  }
4754  }
4755 
4756  vc1_mc_1mv(v, dir);
4757  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4758  vc1_interp_mc(v);
4759  }
4760  }
4761  }
4762  if (s->mb_x == s->mb_width - 1)
4763  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4764  v->cbp[s->mb_x] = block_cbp;
4765  v->ttblk[s->mb_x] = block_tt;
4766  return 0;
4767 }
4768 
4772 {
4773  int k, j;
4774  MpegEncContext *s = &v->s;
4775  int cbp, val;
4776  uint8_t *coded_val;
4777  int mb_pos;
4778 
4779  /* select codingmode used for VLC tables selection */
4780  switch (v->y_ac_table_index) {
4781  case 0:
4783  break;
4784  case 1:
4786  break;
4787  case 2:
4789  break;
4790  }
4791 
4792  switch (v->c_ac_table_index) {
4793  case 0:
4795  break;
4796  case 1:
4798  break;
4799  case 2:
4801  break;
4802  }
4803 
4804  /* Set DC scale - y and c use the same */
4805  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4806  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4807 
4808  //do frame decode
4809  s->mb_x = s->mb_y = 0;
4810  s->mb_intra = 1;
4811  s->first_slice_line = 1;
4812  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4813  s->mb_x = 0;
4814  init_block_index(v);
4815  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4816  uint8_t *dst[6];
4818  dst[0] = s->dest[0];
4819  dst[1] = dst[0] + 8;
4820  dst[2] = s->dest[0] + s->linesize * 8;
4821  dst[3] = dst[2] + 8;
4822  dst[4] = s->dest[1];
4823  dst[5] = s->dest[2];
4824  s->bdsp.clear_blocks(s->block[0]);
4825  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4826  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4827  s->current_picture.qscale_table[mb_pos] = v->pq;
4828  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4829  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4830 
4831  // do actual MB decoding and displaying
4833  v->s.ac_pred = get_bits1(&v->s.gb);
4834 
4835  for (k = 0; k < 6; k++) {
4836  val = ((cbp >> (5 - k)) & 1);
4837 
4838  if (k < 4) {
4839  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4840  val = val ^ pred;
4841  *coded_val = val;
4842  }
4843  cbp |= val << (5 - k);
4844 
4845  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4846 
4847  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4848  continue;
4849  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4850  if (v->pq >= 9 && v->overlap) {
4851  if (v->rangeredfrm)
4852  for (j = 0; j < 64; j++)
4853  s->block[k][j] <<= 1;
4854  s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
4855  k & 4 ? s->uvlinesize
4856  : s->linesize);
4857  } else {
4858  if (v->rangeredfrm)
4859  for (j = 0; j < 64; j++)
4860  s->block[k][j] = (s->block[k][j] - 64) << 1;
4861  s->idsp.put_pixels_clamped(s->block[k], dst[k],
4862  k & 4 ? s->uvlinesize
4863  : s->linesize);
4864  }
4865  }
4866 
4867  if (v->pq >= 9 && v->overlap) {
4868  if (s->mb_x) {
4869  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4870  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4871  if (!(s->flags & CODEC_FLAG_GRAY)) {
4872  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4873  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4874  }
4875  }
4876  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4877  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4878  if (!s->first_slice_line) {
4879  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4880  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4881  if (!(s->flags & CODEC_FLAG_GRAY)) {
4882  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4883  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4884  }
4885  }
4886  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4887  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4888  }
4889  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4890 
4891  if (get_bits_count(&s->gb) > v->bits) {
4892  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4893  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4894  get_bits_count(&s->gb), v->bits);
4895  return;
4896  }
4897  }
4898  if (!v->s.loop_filter)
4899  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4900  else if (s->mb_y)
4901  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4902 
4903  s->first_slice_line = 0;
4904  }
4905  if (v->s.loop_filter)
4906  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4907 
4908  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4909  * profile, these only differ are when decoding MSS2 rectangles. */
4910  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4911 }
4912 
4916 {
4917  int k;
4918  MpegEncContext *s = &v->s;
4919  int cbp, val;
4920  uint8_t *coded_val;
4921  int mb_pos;
4922  int mquant = v->pq;
4923  int mqdiff;
4924  GetBitContext *gb = &s->gb;
4925 
4926  /* select codingmode used for VLC tables selection */
4927  switch (v->y_ac_table_index) {
4928  case 0:
4930  break;
4931  case 1:
4933  break;
4934  case 2:
4936  break;
4937  }
4938 
4939  switch (v->c_ac_table_index) {
4940  case 0:
4942  break;
4943  case 1:
4945  break;
4946  case 2:
4948  break;
4949  }
4950 
4951  // do frame decode
4952  s->mb_x = s->mb_y = 0;
4953  s->mb_intra = 1;
4954  s->first_slice_line = 1;
4955  s->mb_y = s->start_mb_y;
4956  if (s->start_mb_y) {
4957  s->mb_x = 0;
4958  init_block_index(v);
4959  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4960  (1 + s->b8_stride) * sizeof(*s->coded_block));
4961  }
4962  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4963  s->mb_x = 0;
4964  init_block_index(v);
4965  for (;s->mb_x < s->mb_width; s->mb_x++) {
4966  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4968  s->bdsp.clear_blocks(block[0]);
4969  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4970  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4971  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4972  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4973 
4974  // do actual MB decoding and displaying
4975  if (v->fieldtx_is_raw)
4976  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4978  if ( v->acpred_is_raw)
4979  v->s.ac_pred = get_bits1(&v->s.gb);
4980  else
4981  v->s.ac_pred = v->acpred_plane[mb_pos];
4982 
4983  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4984  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4985 
4986  GET_MQUANT();
4987 
4988  s->current_picture.qscale_table[mb_pos] = mquant;
4989  /* Set DC scale - y and c use the same */
4990  s->y_dc_scale = s->y_dc_scale_table[mquant];
4991  s->c_dc_scale = s->c_dc_scale_table[mquant];
4992 
4993  for (k = 0; k < 6; k++) {
4994  val = ((cbp >> (5 - k)) & 1);
4995 
4996  if (k < 4) {
4997  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4998  val = val ^ pred;
4999  *coded_val = val;
5000  }
5001  cbp |= val << (5 - k);
5002 
5003  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
5004  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
5005 
5006  vc1_decode_i_block_adv(v, block[k], k, val,
5007  (k < 4) ? v->codingset : v->codingset2, mquant);
5008 
5009  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
5010  continue;
5012  }
5013 
5017 
5018  if (get_bits_count(&s->gb) > v->bits) {
5019  // TODO: may need modification to handle slice coding
5020  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5021  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5022  get_bits_count(&s->gb), v->bits);
5023  return;
5024  }
5025  }
5026  if (!v->s.loop_filter)
5027  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5028  else if (s->mb_y)
5029  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5030  s->first_slice_line = 0;
5031  }
5032 
5033  /* raw bottom MB row */
5034  s->mb_x = 0;
5035  init_block_index(v);
5036 
5037  for (;s->mb_x < s->mb_width; s->mb_x++) {
5040  if (v->s.loop_filter)
5042  }
5043  if (v->s.loop_filter)
5044  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5045  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5046  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5047 }
5048 
5050 {
5051  MpegEncContext *s = &v->s;
5052  int apply_loop_filter;
5053 
5054  /* select codingmode used for VLC tables selection */
5055  switch (v->c_ac_table_index) {
5056  case 0:
5058  break;
5059  case 1:
5061  break;
5062  case 2:
5064  break;
5065  }
5066 
5067  switch (v->c_ac_table_index) {
5068  case 0:
5070  break;
5071  case 1:
5073  break;
5074  case 2:
5076  break;
5077  }
5078 
5079  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5080  v->fcm == PROGRESSIVE;
5081  s->first_slice_line = 1;
5082  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5083  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5084  s->mb_x = 0;
5085  init_block_index(v);
5086  for (; s->mb_x < s->mb_width; s->mb_x++) {
5088 
5089  if (v->fcm == ILACE_FIELD)
5091  else if (v->fcm == ILACE_FRAME)
5093  else vc1_decode_p_mb(v);
5094  if (s->mb_y != s->start_mb_y && apply_loop_filter)
5096  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5097  // TODO: may need modification to handle slice coding
5098  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5099  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5100  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5101  return;
5102  }
5103  }
5104  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5105  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5106  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5107  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5108  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5109  s->first_slice_line = 0;
5110  }
5111  if (apply_loop_filter) {
5112  s->mb_x = 0;
5113  init_block_index(v);
5114  for (; s->mb_x < s->mb_width; s->mb_x++) {
5117  }
5118  }
5119  if (s->end_mb_y >= s->start_mb_y)
5120  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5121  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5122  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5123 }
5124 
5126 {
5127  MpegEncContext *s = &v->s;
5128 
5129  /* select codingmode used for VLC tables selection */
5130  switch (v->c_ac_table_index) {
5131  case 0:
5133  break;
5134  case 1:
5136  break;
5137  case 2:
5139  break;
5140  }
5141 
5142  switch (v->c_ac_table_index) {
5143  case 0:
5145  break;
5146  case 1:
5148  break;
5149  case 2:
5151  break;
5152  }
5153 
5154  s->first_slice_line = 1;
5155  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5156  s->mb_x = 0;
5157  init_block_index(v);
5158  for (; s->mb_x < s->mb_width; s->mb_x++) {
5160 
5161  if (v->fcm == ILACE_FIELD)
5163  else if (v->fcm == ILACE_FRAME)
5165  else
5166  vc1_decode_b_mb(v);
5167  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5168  // TODO: may need modification to handle slice coding
5169  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5170  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5171  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5172  return;
5173  }
5174  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5175  }
5176  if (!v->s.loop_filter)
5177  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5178  else if (s->mb_y)
5179  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5180  s->first_slice_line = 0;
5181  }
5182  if (v->s.loop_filter)
5183  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5184  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5185  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5186 }
5187 
5189 {
5190  MpegEncContext *s = &v->s;
5191 
5192  if (!v->s.last_picture.f->data[0])
5193  return;
5194 
5195  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5196  s->first_slice_line = 1;
5197  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5198  s->mb_x = 0;
5199  init_block_index(v);
5201  memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5202  memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5203  memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5204  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5205  s->first_slice_line = 0;
5206  }
5208 }
5209 
5211 {
5212 
5213  v->s.esc3_level_length = 0;
5214  if (v->x8_type) {
5215  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5216  } else {
5217  v->cur_blk_idx = 0;
5218  v->left_blk_idx = -1;
5219  v->topleft_blk_idx = 1;
5220  v->top_blk_idx = 2;
5221  switch (v->s.pict_type) {
5222  case AV_PICTURE_TYPE_I:
5223  if (v->profile == PROFILE_ADVANCED)
5225  else
5227  break;
5228  case AV_PICTURE_TYPE_P:
5229  if (v->p_frame_skipped)
5231  else
5233  break;
5234  case AV_PICTURE_TYPE_B:
5235  if (v->bi_type) {
5236  if (v->profile == PROFILE_ADVANCED)
5238  else
5240  } else
5242  break;
5243  }
5244  }
5245 }
5246 
5247 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5248 
5249 typedef struct {
5261  int coefs[2][7];
5262 
5263  int effect_type, effect_flag;
5264  int effect_pcount1, effect_pcount2;
5265  int effect_params1[15], effect_params2[10];
5266 } SpriteData;
5267 
5268 static inline int get_fp_val(GetBitContext* gb)
5269 {
5270  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5271 }
5272 
5273 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5274 {
5275  c[1] = c[3] = 0;
5276 
5277  switch (get_bits(gb, 2)) {
5278  case 0:
5279  c[0] = 1 << 16;
5280  c[2] = get_fp_val(gb);
5281  c[4] = 1 << 16;
5282  break;
5283  case 1:
5284  c[0] = c[4] = get_fp_val(gb);
5285  c[2] = get_fp_val(gb);
5286  break;
5287  case 2:
5288  c[0] = get_fp_val(gb);
5289  c[2] = get_fp_val(gb);
5290  c[4] = get_fp_val(gb);
5291  break;
5292  case 3:
5293  c[0] = get_fp_val(gb);
5294  c[1] = get_fp_val(gb);
5295  c[2] = get_fp_val(gb);
5296  c[3] = get_fp_val(gb);
5297  c[4] = get_fp_val(gb);
5298  break;
5299  }
5300  c[5] = get_fp_val(gb);
5301  if (get_bits1(gb))
5302  c[6] = get_fp_val(gb);
5303  else
5304  c[6] = 1 << 16;
5305 }
5306 
5307 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5308 {
5309  AVCodecContext *avctx = v->s.avctx;
5310  int sprite, i;
5311 
5312  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5313  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5314  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5315  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5316  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5317  for (i = 0; i < 7; i++)
5318  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5319  sd->coefs[sprite][i] / (1<<16),
5320  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5321  av_log(avctx, AV_LOG_DEBUG, "\n");
5322  }
5323 
5324  skip_bits(gb, 2);
5325  if (sd->effect_type = get_bits_long(gb, 30)) {
5326  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5327  case 7:
5328  vc1_sprite_parse_transform(gb, sd->effect_params1);
5329  break;
5330  case 14:
5331  vc1_sprite_parse_transform(gb, sd->effect_params1);
5332  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5333  break;
5334  default:
5335  for (i = 0; i < sd->effect_pcount1; i++)
5336  sd->effect_params1[i] = get_fp_val(gb);
5337  }
5338  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5339  // effect 13 is simple alpha blending and matches the opacity above
5340  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5341  for (i = 0; i < sd->effect_pcount1; i++)
5342  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5343  sd->effect_params1[i] / (1 << 16),
5344  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5345  av_log(avctx, AV_LOG_DEBUG, "\n");
5346  }
5347 
5348  sd->effect_pcount2 = get_bits(gb, 16);
5349  if (sd->effect_pcount2 > 10) {
5350  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5351  return;
5352  } else if (sd->effect_pcount2) {
5353  i = -1;
5354  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5355  while (++i < sd->effect_pcount2) {
5356  sd->effect_params2[i] = get_fp_val(gb);
5357  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5358  sd->effect_params2[i] / (1 << 16),
5359  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5360  }
5361  av_log(avctx, AV_LOG_DEBUG, "\n");
5362  }
5363  }
5364  if (sd->effect_flag = get_bits1(gb))
5365  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5366 
5367  if (get_bits_count(gb) >= gb->size_in_bits +
5368  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5369  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5370  if (get_bits_count(gb) < gb->size_in_bits - 8)
5371  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5372 }
5373 
5374 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5375 {
5376  int i, plane, row, sprite;
5377  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5378  uint8_t* src_h[2][2];
5379  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5380  int ysub[2];
5381  MpegEncContext *s = &v->s;
5382 
5383  for (i = 0; i < 2; i++) {
5384  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5385  xadv[i] = sd->coefs[i][0];
5386  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5387  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5388 
5389  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5390  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5391  }
5392  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5393 
5394  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5395  int width = v->output_width>>!!plane;
5396 
5397  for (row = 0; row < v->output_height>>!!plane; row++) {
5398  uint8_t *dst = v->sprite_output_frame->data[plane] +
5399  v->sprite_output_frame->linesize[plane] * row;
5400 
5401  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5402  uint8_t *iplane = s->current_picture.f->data[plane];
5403  int iline = s->current_picture.f->linesize[plane];
5404  int ycoord = yoff[sprite] + yadv[sprite] * row;
5405  int yline = ycoord >> 16;
5406  int next_line;
5407  ysub[sprite] = ycoord & 0xFFFF;
5408  if (sprite) {
5409  iplane = s->last_picture.f->data[plane];
5410  iline = s->last_picture.f->linesize[plane];
5411  }
5412  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5413  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5414  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5415  if (ysub[sprite])
5416  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5417  } else {
5418  if (sr_cache[sprite][0] != yline) {
5419  if (sr_cache[sprite][1] == yline) {
5420  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5421  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5422  } else {
5423  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5424  sr_cache[sprite][0] = yline;
5425  }
5426  }
5427  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5428  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5429  iplane + next_line, xoff[sprite],
5430  xadv[sprite], width);
5431  sr_cache[sprite][1] = yline + 1;
5432  }
5433  src_h[sprite][0] = v->sr_rows[sprite][0];
5434  src_h[sprite][1] = v->sr_rows[sprite][1];
5435  }
5436  }
5437 
5438  if (!v->two_sprites) {
5439  if (ysub[0]) {
5440  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5441  } else {
5442  memcpy(dst, src_h[0][0], width);
5443  }
5444  } else {
5445  if (ysub[0] && ysub[1]) {
5446  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5447  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5448  } else if (ysub[0]) {
5449  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5450  src_h[1][0], alpha, width);
5451  } else if (ysub[1]) {
5452  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5453  src_h[0][0], (1<<16)-1-alpha, width);
5454  } else {
5455  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5456  }
5457  }
5458  }
5459 
5460  if (!plane) {
5461  for (i = 0; i < 2; i++) {
5462  xoff[i] >>= 1;
5463  yoff[i] >>= 1;
5464  }
5465  }
5466 
5467  }
5468 }
5469 
5470 
5471 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5472 {
5473  MpegEncContext *s = &v->s;
5474  AVCodecContext *avctx = s->avctx;
5475  SpriteData sd;
5476 
5477  vc1_parse_sprites(v, gb, &sd);
5478 
5479  if (!s->current_picture.f || !s->current_picture.f->data[0]) {
5480  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5481  return -1;
5482  }
5483 
5484  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5485  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5486  v->two_sprites = 0;
5487  }
5488 
5490  if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5491  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5492  return -1;
5493  }
5494 
5495  vc1_draw_sprites(v, &sd);
5496 
5497  return 0;
5498 }
5499 
5500 static void vc1_sprite_flush(AVCodecContext *avctx)
5501 {
5502  VC1Context *v = avctx->priv_data;
5503  MpegEncContext *s = &v->s;
5504  AVFrame *f = s->current_picture.f;
5505  int plane, i;
5506 
5507  /* Windows Media Image codecs have a convergence interval of two keyframes.
5508  Since we can't enforce it, clear to black the missing sprite. This is
5509  wrong but it looks better than doing nothing. */
5510 
5511  if (f && f->data[0])
5512  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5513  for (i = 0; i < v->sprite_height>>!!plane; i++)
5514  memset(f->data[plane] + i * f->linesize[plane],
5515  plane ? 128 : 0, f->linesize[plane]);
5516 }
5517 
5518 #endif
5519 
5521 {
5522  MpegEncContext *s = &v->s;
5523  int i;
5524  int mb_height = FFALIGN(s->mb_height, 2);
5525 
5526  /* Allocate mb bitplanes */
5527  v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5528  v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5529  v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5530  v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5531  v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5532  v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5533 
5534  v->n_allocated_blks = s->mb_width + 2;
5535  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5536  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5537  v->cbp = v->cbp_base + s->mb_stride;
5538  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5539  v->ttblk = v->ttblk_base + s->mb_stride;
5540  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5541  v->is_intra = v->is_intra_base + s->mb_stride;
5542  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5543  v->luma_mv = v->luma_mv_base + s->mb_stride;
5544 
5545  /* allocate block type info in that way so it could be used with s->block_index[] */
5546  v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5547  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5548  v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5549  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5550 
5551  /* allocate memory to store block level MV info */
5552  v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5553  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5554  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5555  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5556  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5557  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5558  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5559  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5560 
5561  /* Init coded blocks info */
5562  if (v->profile == PROFILE_ADVANCED) {
5563 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5564 // return -1;
5565 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5566 // return -1;
5567  }
5568 
5569  ff_intrax8_common_init(&v->x8,s);
5570 
5572  for (i = 0; i < 4; i++)
5573  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5574  }
5575 
5576  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5577  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5578  !v->mb_type_base) {
5581  av_freep(&v->acpred_plane);
5583  av_freep(&v->block);
5584  av_freep(&v->cbp_base);
5585  av_freep(&v->ttblk_base);
5586  av_freep(&v->is_intra_base);
5587  av_freep(&v->luma_mv_base);
5588  av_freep(&v->mb_type_base);
5589  return AVERROR(ENOMEM);
5590  }
5591 
5592  return 0;
5593 }
5594 
5596 {
5597  int i;
5598  for (i = 0; i < 64; i++) {
5599 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5600  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5601  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5602  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5603  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5605  }
5606  v->left_blk_sh = 0;
5607  v->top_blk_sh = 3;
5608 }
5609 
5615 {
5616  VC1Context *v = avctx->priv_data;
5617  MpegEncContext *s = &v->s;
5618  GetBitContext gb;
5619 
5620  /* save the container output size for WMImage */
5621  v->output_width = avctx->width;
5622  v->output_height = avctx->height;
5623 
5624  if (!avctx->extradata_size || !avctx->extradata)
5625  return -1;
5626  if (!(avctx->flags & CODEC_FLAG_GRAY))
5627  avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
5628  else
5629  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5630  v->s.avctx = avctx;
5631 
5632  if (ff_vc1_init_common(v) < 0)
5633  return -1;
5634  ff_blockdsp_init(&s->bdsp, avctx);
5636  ff_qpeldsp_init(&s->qdsp);
5637 
5638  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5639  int count = 0;
5640 
5641  // looks like WMV3 has a sequence header stored in the extradata
5642  // advanced sequence header may be before the first frame
5643  // the last byte of the extradata is a version number, 1 for the
5644  // samples we can decode
5645 
5646  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5647 
5648  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5649  return -1;
5650 
5651  count = avctx->extradata_size*8 - get_bits_count(&gb);
5652  if (count > 0) {
5653  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5654  count, get_bits(&gb, count));
5655  } else if (count < 0) {
5656  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5657  }
5658  } else { // VC1/WVC1/WVP2
5659  const uint8_t *start = avctx->extradata;
5660  uint8_t *end = avctx->extradata + avctx->extradata_size;
5661  const uint8_t *next;
5662  int size, buf2_size;
5663  uint8_t *buf2 = NULL;
5664  int seq_initialized = 0, ep_initialized = 0;
5665 
5666  if (avctx->extradata_size < 16) {
5667  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5668  return -1;
5669  }
5670 
5672  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5673  next = start;
5674  for (; next < end; start = next) {
5675  next = find_next_marker(start + 4, end);
5676  size = next - start - 4;
5677  if (size <= 0)
5678  continue;
5679  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5680  init_get_bits(&gb, buf2, buf2_size * 8);
5681  switch (AV_RB32(start)) {
5682  case VC1_CODE_SEQHDR:
5683  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5684  av_free(buf2);
5685  return -1;
5686  }
5687  seq_initialized = 1;
5688  break;
5689  case VC1_CODE_ENTRYPOINT:
5690  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5691  av_free(buf2);
5692  return -1;
5693  }
5694  ep_initialized = 1;
5695  break;
5696  }
5697  }
5698  av_free(buf2);
5699  if (!seq_initialized || !ep_initialized) {
5700  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5701  return -1;
5702  }
5703  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5704  }
5705 
5707  if (!v->sprite_output_frame)
5708  return AVERROR(ENOMEM);
5709 
5710  avctx->profile = v->profile;
5711  if (v->profile == PROFILE_ADVANCED)
5712  avctx->level = v->level;
5713 
5714  avctx->has_b_frames = !!avctx->max_b_frames;
5715 
5716  s->mb_width = (avctx->coded_width + 15) >> 4;
5717  s->mb_height = (avctx->coded_height + 15) >> 4;
5718 
5719  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5721  } else {
5722  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5723  v->left_blk_sh = 3;
5724  v->top_blk_sh = 0;
5725  }
5726 
5727  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5728  v->sprite_width = avctx->coded_width;
5729  v->sprite_height = avctx->coded_height;
5730 
5731  avctx->coded_width = avctx->width = v->output_width;
5732  avctx->coded_height = avctx->height = v->output_height;
5733 
5734  // prevent 16.16 overflows
5735  if (v->sprite_width > 1 << 14 ||
5736  v->sprite_height > 1 << 14 ||
5737  v->output_width > 1 << 14 ||
5738  v->output_height > 1 << 14) return -1;
5739  }
5740  return 0;
5741 }
5742 
5747 {
5748  VC1Context *v = avctx->priv_data;
5749  int i;
5750 
5752 
5753  for (i = 0; i < 4; i++)
5754  av_freep(&v->sr_rows[i >> 1][i & 1]);
5755  av_freep(&v->hrd_rate);
5756  av_freep(&v->hrd_buffer);
5757  ff_mpv_common_end(&v->s);
5761  av_freep(&v->fieldtx_plane);
5762  av_freep(&v->acpred_plane);
5764  av_freep(&v->mb_type_base);
5766  av_freep(&v->mv_f_base);
5767  av_freep(&v->mv_f_next_base);
5768  av_freep(&v->block);
5769  av_freep(&v->cbp_base);
5770  av_freep(&v->ttblk_base);
5771  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5772  av_freep(&v->luma_mv_base);
5774  return 0;
5775 }
5776 
5777 
5781 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5782  int *got_frame, AVPacket *avpkt)
5783 {
5784  const uint8_t *buf = avpkt->data;
5785  int buf_size = avpkt->size, n_slices = 0, i, ret;
5786  VC1Context *v = avctx->priv_data;
5787  MpegEncContext *s = &v->s;
5788  AVFrame *pict = data;
5789  uint8_t *buf2 = NULL;
5790  const uint8_t *buf_start = buf;
5791  int mb_height, n_slices1;
5792  struct {
5793  uint8_t *buf;
5794  GetBitContext gb;
5795  int mby_start;
5796  } *slices = NULL, *tmp;
5797 
5798  /* no supplementary picture */
5799  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5800  /* special case for last picture */
5801  if (s->low_delay == 0 && s->next_picture_ptr) {
5802  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5803  return ret;
5804  s->next_picture_ptr = NULL;
5805 
5806  *got_frame = 1;
5807  }
5808 
5809  return 0;
5810  }
5811 
5812  //for advanced profile we may need to parse and unescape data
5813  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5814  int buf_size2 = 0;
5815  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5816 
5817  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5818  const uint8_t *start, *end, *next;
5819  int size;
5820 
5821  next = buf;
5822  for (start = buf, end = buf + buf_size; next < end; start = next) {
5823  next = find_next_marker(start + 4, end);
5824  size = next - start - 4;
5825  if (size <= 0) continue;
5826  switch (AV_RB32(start)) {
5827  case VC1_CODE_FRAME:
5828  if (avctx->hwaccel)
5829  buf_start = start;
5830  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5831  break;
5832  case VC1_CODE_FIELD: {
5833  int buf_size3;
5834  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5835  if (!tmp)
5836  goto err;
5837  slices = tmp;
5838  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5839  if (!slices[n_slices].buf)
5840  goto err;
5841  buf_size3 = vc1_unescape_buffer(start + 4, size,
5842  slices[n_slices].buf);
5843  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5844  buf_size3 << 3);
5845  /* assuming that the field marker is at the exact middle,
5846  hope it's correct */
5847  slices[n_slices].mby_start = s->mb_height >> 1;
5848  n_slices1 = n_slices - 1; // index of the last slice of the first field
5849  n_slices++;
5850  break;
5851  }
5852  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5853  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5854  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5855  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5856  break;
5857  case VC1_CODE_SLICE: {
5858  int buf_size3;
5859  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5860  if (!tmp)
5861  goto err;
5862  slices = tmp;
5863  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5864  if (!slices[n_slices].buf)
5865  goto err;
5866  buf_size3 = vc1_unescape_buffer(start + 4, size,
5867  slices[n_slices].buf);
5868  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5869  buf_size3 << 3);
5870  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5871  n_slices++;
5872  break;
5873  }
5874  }
5875  }
5876  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5877  const uint8_t *divider;
5878  int buf_size3;
5879 
5880  divider = find_next_marker(buf, buf + buf_size);
5881  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5882  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5883  goto err;
5884  } else { // found field marker, unescape second field
5885  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5886  if (!tmp)
5887  goto err;
5888  slices = tmp;
5889  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5890  if (!slices[n_slices].buf)
5891  goto err;
5892  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5893  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5894  buf_size3 << 3);
5895  slices[n_slices].mby_start = s->mb_height >> 1;
5896  n_slices1 = n_slices - 1;
5897  n_slices++;
5898  }
5899  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5900  } else {
5901  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5902  }
5903  init_get_bits(&s->gb, buf2, buf_size2*8);
5904  } else
5905  init_get_bits(&s->gb, buf, buf_size*8);
5906 
5907  if (v->res_sprite) {
5908  v->new_sprite = !get_bits1(&s->gb);
5909  v->two_sprites = get_bits1(&s->gb);
5910  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5911  we're using the sprite compositor. These are intentionally kept separate
5912  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5913  the vc1 one for WVP2 */
5914  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5915  if (v->new_sprite) {
5916  // switch AVCodecContext parameters to those of the sprites
5917  avctx->width = avctx->coded_width = v->sprite_width;
5918  avctx->height = avctx->coded_height = v->sprite_height;
5919  } else {
5920  goto image;
5921  }
5922  }
5923  }
5924 
5925  if (s->context_initialized &&
5926  (s->width != avctx->coded_width ||
5927  s->height != avctx->coded_height)) {
5928  ff_vc1_decode_end(avctx);
5929  }
5930 
5931  if (!s->context_initialized) {
5932  if (ff_msmpeg4_decode_init(avctx) < 0)
5933  goto err;
5934  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5935  ff_mpv_common_end(s);
5936  goto err;
5937  }
5938 
5939  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5940 
5941  if (v->profile == PROFILE_ADVANCED) {
5942  s->h_edge_pos = avctx->coded_width;
5943  s->v_edge_pos = avctx->coded_height;
5944  }
5945  }
5946 
5947  // do parse frame header
5948  v->pic_header_flag = 0;
5949  v->first_pic_header_flag = 1;
5950  if (v->profile < PROFILE_ADVANCED) {
5951  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5952  goto err;
5953  }
5954  } else {
5955  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5956  goto err;
5957  }
5958  }
5959  v->first_pic_header_flag = 0;
5960 
5961  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5962  && s->pict_type != AV_PICTURE_TYPE_I) {
5963  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5964  goto err;
5965  }
5966 
5967  // for skipping the frame
5970 
5971  /* skip B-frames if we don't have reference frames */
5972  if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5973  goto end;
5974  }
5975  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5976  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5977  avctx->skip_frame >= AVDISCARD_ALL) {
5978  goto end;
5979  }
5980 
5981  if (s->next_p_frame_damaged) {
5982  if (s->pict_type == AV_PICTURE_TYPE_B)
5983  goto end;
5984  else
5985  s->next_p_frame_damaged = 0;
5986  }
5987 
5988  if (ff_mpv_frame_start(s, avctx) < 0) {
5989  goto err;
5990  }
5991 
5992  // process pulldown flags
5994  // Pulldown flags are only valid when 'broadcast' has been set.
5995  // So ticks_per_frame will be 2
5996  if (v->rff) {
5997  // repeat field
5999  } else if (v->rptfrm) {
6000  // repeat frames
6001  s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
6002  }
6003 
6006 
6007  if (avctx->hwaccel) {
6008  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
6009  goto err;
6010  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6011  goto err;
6012  if (avctx->hwaccel->end_frame(avctx) < 0)
6013  goto err;
6014  } else {
6015  int header_ret = 0;
6016 
6018 
6019  v->bits = buf_size * 8;
6020  v->end_mb_x = s->mb_width;
6021  if (v->field_mode) {
6022  s->current_picture.f->linesize[0] <<= 1;
6023  s->current_picture.f->linesize[1] <<= 1;
6024  s->current_picture.f->linesize[2] <<= 1;
6025  s->linesize <<= 1;
6026  s->uvlinesize <<= 1;
6027  }
6028  mb_height = s->mb_height >> v->field_mode;
6029 
6030  if (!mb_height) {
6031  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6032  goto err;
6033  }
6034 
6035  for (i = 0; i <= n_slices; i++) {
6036  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6037  if (v->field_mode <= 0) {
6038  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6039  "picture boundary (%d >= %d)\n", i,
6040  slices[i - 1].mby_start, mb_height);
6041  continue;
6042  }
6043  v->second_field = 1;
6044  v->blocks_off = s->mb_width * s->mb_height << 1;
6045  v->mb_off = s->mb_stride * s->mb_height >> 1;
6046  } else {
6047  v->second_field = 0;
6048  v->blocks_off = 0;
6049  v->mb_off = 0;
6050  }
6051  if (i) {
6052  v->pic_header_flag = 0;
6053  if (v->field_mode && i == n_slices1 + 2) {
6054  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6055  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6056  if (avctx->err_recognition & AV_EF_EXPLODE)
6057  goto err;
6058  continue;
6059  }
6060  } else if (get_bits1(&s->gb)) {
6061  v->pic_header_flag = 1;
6062  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6063  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6064  if (avctx->err_recognition & AV_EF_EXPLODE)
6065  goto err;
6066  continue;
6067  }
6068  }
6069  }
6070  if (header_ret < 0)
6071  continue;
6072  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6073  if (!v->field_mode || v->second_field)
6074  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6075  else
6076  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6077 
6078  if (s->end_mb_y <= s->start_mb_y) {
6079  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid slice size\n");
6080  goto err;
6081  }
6082 
6084  if (i != n_slices)
6085  s->gb = slices[i].gb;
6086  }
6087  if (v->field_mode) {
6088  v->second_field = 0;
6089  s->current_picture.f->linesize[0] >>= 1;
6090  s->current_picture.f->linesize[1] >>= 1;
6091  s->current_picture.f->linesize[2] >>= 1;
6092  s->linesize >>= 1;
6093  s->uvlinesize >>= 1;
6095  FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6096  FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6097  }
6098  }
6099  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6100  get_bits_count(&s->gb), s->gb.size_in_bits);
6101 // if (get_bits_count(&s->gb) > buf_size * 8)
6102 // return -1;
6103  if (!v->field_mode)
6104  ff_er_frame_end(&s->er);
6105  }
6106 
6107  ff_mpv_frame_end(s);
6108 
6109  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6110 image:
6111  avctx->width = avctx->coded_width = v->output_width;
6112  avctx->height = avctx->coded_height = v->output_height;
6113  if (avctx->skip_frame >= AVDISCARD_NONREF)
6114  goto end;
6115 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6116  if (vc1_decode_sprites(v, &s->gb))
6117  goto err;
6118 #endif
6119  if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6120  goto err;
6121  *got_frame = 1;
6122  } else {
6123  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6124  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6125  goto err;
6127  *got_frame = 1;
6128  } else if (s->last_picture_ptr) {
6129  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6130  goto err;
6132  *got_frame = 1;
6133  }
6134  }
6135 
6136 end:
6137  av_free(buf2);
6138  for (i = 0; i < n_slices; i++)
6139  av_free(slices[i].buf);
6140  av_free(slices);
6141  return buf_size;
6142 
6143 err:
6144  av_free(buf2);
6145  for (i = 0; i < n_slices; i++)
6146  av_free(slices[i].buf);
6147  av_free(slices);
6148  return -1;
6149 }
6150 
6151 
6152 static const AVProfile profiles[] = {
6153  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6154  { FF_PROFILE_VC1_MAIN, "Main" },
6155  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6156  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6157  { FF_PROFILE_UNKNOWN },
6158 };
6159 
6161 #if CONFIG_VC1_DXVA2_HWACCEL
6163 #endif
6164 #if CONFIG_VC1_VAAPI_HWACCEL
6166 #endif
6167 #if CONFIG_VC1_VDPAU_HWACCEL
6169 #endif
6172 };
6173 
6175  .name = "vc1",
6176  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6177  .type = AVMEDIA_TYPE_VIDEO,
6178  .id = AV_CODEC_ID_VC1,
6179  .priv_data_size = sizeof(VC1Context),
6180  .init = vc1_decode_init,
6183  .flush = ff_mpeg_flush,
6184  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6185  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6186  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6187 };
6188 
6189 #if CONFIG_WMV3_DECODER
6190 AVCodec ff_wmv3_decoder = {
6191  .name = "wmv3",
6192  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6193  .type = AVMEDIA_TYPE_VIDEO,
6194  .id = AV_CODEC_ID_WMV3,
6195  .priv_data_size = sizeof(VC1Context),
6196  .init = vc1_decode_init,
6199  .flush = ff_mpeg_flush,
6200  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6201  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6202  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6203 };
6204 #endif
6205 
6206 #if CONFIG_WMV3IMAGE_DECODER
6207 AVCodec ff_wmv3image_decoder = {
6208  .name = "wmv3image",
6209  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6210  .type = AVMEDIA_TYPE_VIDEO,
6211  .id = AV_CODEC_ID_WMV3IMAGE,
6212  .priv_data_size = sizeof(VC1Context),
6213  .init = vc1_decode_init,
6216  .capabilities = CODEC_CAP_DR1,
6217  .flush = vc1_sprite_flush,
6218  .pix_fmts = (const enum AVPixelFormat[]) {
6221  },
6222 };
6223 #endif
6224 
6225 #if CONFIG_VC1IMAGE_DECODER
6226 AVCodec ff_vc1image_decoder = {
6227  .name = "vc1image",
6228  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6229  .type = AVMEDIA_TYPE_VIDEO,
6230  .id = AV_CODEC_ID_VC1IMAGE,
6231  .priv_data_size = sizeof(VC1Context),
6232  .init = vc1_decode_init,
6235  .capabilities = CODEC_CAP_DR1,
6236  .flush = vc1_sprite_flush,
6237  .pix_fmts = (const enum AVPixelFormat[]) {
6240  },
6241 };
6242 #endif
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
Definition: vc1dec.c:265
in the bitstream is reported as 00b
Definition: vc1.h:173
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
Definition: vc1data.c:34
op_pixels_func avg_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:59
#define VC1_TTBLK_VLC_BITS
Definition: vc1data.c:126
IDCTDSPContext idsp
Definition: mpegvideo.h:354
void(* vc1_h_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:45
const struct AVCodec * codec
Definition: avcodec.h:1059
int topleft_blk_idx
Definition: vc1.h:393
#define MB_TYPE_SKIP
Definition: avcodec.h:786
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2637
discard all frames except keyframes
Definition: avcodec.h:567
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2368
#define VC1_IF_MBMODE_VLC_BITS
Definition: vc1data.c:145
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2972
BI type.
Definition: avutil.h:259
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int p_frame_skipped
Definition: vc1.h:388
Imode
Imode types.
Definition: vc1.c:54
static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
Definition: vc1dec.c:3222
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
The VC1 Context.
Definition: vc1.h:182
int size
int esc3_level_length
Definition: mpegvideo.h:550
This structure describes decoded (raw) audio or video data.
Definition: frame.h:135
VLC ff_vc1_ttblk_vlc[3]
Definition: vc1data.c:127
#define VC1_ICBPCY_VLC_BITS
Definition: vc1data.c:120
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
Definition: vc1dec.c:3535
int k_x
Number of bits for MVs (depends on MV range)
Definition: vc1.h:243
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:37
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:366
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:279
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:314
uint8_t * mv_f_base
Definition: vc1.h:358
Definition: vf_drawbox.c:37
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:38
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:35
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1244
int mv_type_is_raw
mv type mb plane is not coded
Definition: vc1.h:297
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1dec.c:1380
uint8_t dmvrange
Frame decoding info for interlaced picture.
Definition: vc1.h:343
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define ER_MB_END
#define AC_VLC_BITS
Definition: intrax8.c:37
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1349
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
Definition: vc1acdata.h:34
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:280
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1325
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:320
static const int vc1_last_decode_table[AC_MODES]
Definition: vc1acdata.h:30
int tt_index
Index for Transform Type tables (to decode TTMB)
Definition: vc1.h:293
static void vc1_decode_p_blocks(VC1Context *v)
Definition: vc1dec.c:5049
static void vc1_put_signed_blocks_clamped(VC1Context *v)
Definition: vc1dec.c:91
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:258
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
Definition: vc1dec.c:1145
#define VC1_2REF_MVDATA_VLC_BITS
Definition: vc1data.c:140
void ff_er_frame_end(ERContext *s)
static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V) ...
Definition: vc1dec.c:980
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:69
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:315
int next_use_ic
Definition: vc1.h:305
int size
Definition: avcodec.h:974
void(* clear_blocks)(int16_t *blocks)
Definition: blockdsp.h:36
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:310
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:127
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:281
#define MB_TYPE_INTRA
Definition: mpegutils.h:69
void ff_print_debug_info(MpegEncContext *s, Picture *p)
Print debugging info for the given picture.
Definition: mpegvideo.c:1910
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1270
int frfd
Definition: vc1.h:375
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:59
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
Definition: vc1.h:247
static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
Definition: vc1dec.c:3012
static void vc1_decode_b_blocks(VC1Context *v)
Definition: vc1dec.c:5125
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
mpegvideo header.
int top_blk_idx
Definition: vc1.h:393
IntraX8Context x8
Definition: vc1.h:184
VLC * imv_vlc
Definition: vc1.h:349
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mb_type_base
Definition: vc1.h:272
discard all
Definition: avcodec.h:568
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:358
int sprite_height
Definition: vc1.h:384
uint8_t run
Definition: svq3.c:146
int last_use_ic
Definition: vc1.h:305
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:230
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:401
int profile
profile
Definition: avcodec.h:2638
QpelDSPContext qdsp
Definition: mpegvideo.h:359
AVCodec.
Definition: avcodec.h:2812
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:50
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1dec.c:5210
int block_wrap[6]
Definition: mpegvideo.h:416
#define FFALIGN(x, a)
Definition: common.h:62
uint8_t rff
Definition: vc1.h:319
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1dec.c:2327
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
Definition: vc1dec.c:2535
enum AVDiscard skip_frame
Definition: avcodec.h:2743
int bits
Definition: vc1.h:188
int range_x
Definition: vc1.h:245
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:122
static void vc1_apply_p_loop_filter(VC1Context *v)
Definition: vc1dec.c:3508
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2448
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2361
int esc3_run_length
Definition: mpegvideo.h:551
int refdist
distance of the current picture from reference
Definition: vc1.h:363
uint8_t * acpred_plane
AC prediction flags bitplane.
Definition: vc1.h:329
VC-1 tables.
int bi_type
Definition: vc1.h:389
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const AVProfile profiles[]
Definition: vc1dec.c:6152
uint8_t bits
Definition: crc.c:251
uint8_t
static int vc1_decode_b_mb_intfr(VC1Context *v)
Decode one B-frame MB (in interlaced frame B picture)
Definition: vc1dec.c:4425
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:57
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:48
int first_pic_header_flag
Definition: vc1.h:376
uint16_t * hrd_rate
Definition: vc1.h:334
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1568
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
Definition: vc1dsp.h:71
void(* vc1_inv_trans_8x8)(int16_t *b)
Definition: vc1dsp.h:36
#define DC_VLC_BITS
Definition: vc1dec.c:49
int left_blk_idx
Definition: vc1.h:393
#define AV_RB32
Definition: intreadwrite.h:130
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:210
int y_ac_table_index
Luma index from AC2FRM element.
Definition: vc1.h:263
#define b
Definition: input.c:52
int second_field
Definition: vc1.h:362
#define ER_MB_ERROR
int n_allocated_blks
Definition: vc1.h:393
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:191
int c_ac_table_index
AC coding set indexes.
Definition: vc1.h:262
const int ff_vc1_ac_sizes[AC_MODES]
Definition: vc1data.c:1133
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:41
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1164
int ttfrm
Transform type info present at frame level.
Definition: vc1.h:265
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:306
int codingset2
index of current table set from 11.8 to use for chroma block decoding
Definition: vc1.h:269
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:280
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
int16_t((* luma_mv)[2]
Definition: vc1.h:396
quarterpel DSP functions
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:227
const char data[16]
Definition: mxf.c:70
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
Definition: vc1dec.c:197
MSMPEG4 data tables.
uint8_t * data
Definition: avcodec.h:973
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:51
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1252
uint8_t * forward_mb_plane
bitplane for "forward" MBs
Definition: vc1.h:296
uint8_t last_luty[2][256]
Definition: vc1.h:301
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:255
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
Definition: vc1.c:294
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
Definition: vc1dec.c:2097
#define B
Definition: huffyuv.h:49
int fieldtx_is_raw
Definition: vc1.h:355
uint8_t * over_flags_plane
Overflags bitplane.
Definition: vc1.h:331
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
Definition: vc1dec.c:4114
uint8_t fourmvbp
Definition: vc1.h:353
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
Definition: vc1data.c:1065
int range_y
MV range.
Definition: vc1.h:245
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:756
uint8_t last_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:301
uint8_t ttmbf
Transform type flag.
Definition: vc1.h:266
Definition: vc1.h:143
int k_y
Number of bits for MVs (depends on MV range)
Definition: vc1.h:244
#define transpose(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:555
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:313
uint8_t twomvbp
Definition: vc1.h:352
int dmb_is_raw
direct mb plane is raw
Definition: vc1.h:298
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
Definition: vc1dec.c:2393
int16_t(* block)[6][64]
Definition: vc1.h:392
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:40
#define VC1_CBPCY_P_VLC_BITS
Definition: vc1data.c:118
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1355
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int overlap
overlapped transforms in use
Definition: vc1.h:234
in the bitstream is reported as 11b
Definition: vc1.h:175
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
Definition: vc1data.c:1022
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:39
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:69
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
Definition: vc1dec.c:345
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:145
ERContext er
Definition: mpegvideo.h:638
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
Definition: vc1dec.c:5614
#define GET_MQUANT()
Get macroblock-level quantizer scale.
Definition: vc1dec.c:1103
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:327
uint8_t * mv_f_next_base
Definition: vc1.h:359
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1144
VLC * mbmode_vlc
Definition: vc1.h:348
#define wrap(func)
Definition: neontest.h:62
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:169
const char * name
Name of the codec implementation.
Definition: avcodec.h:2819
#define IS_MARKER(state, i, buf, buf_size)
Definition: dca_parser.c:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:514
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:519
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
GetBitContext gb
Definition: mpegvideo.h:558
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1478
#define FFMAX(a, b)
Definition: common.h:55
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:52
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1dec.c:1285
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1dec.c:1681
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:249
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:200
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:43
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:53
int a_avail
Definition: vc1.h:271
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:357
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
Definition: vc1dec.c:2571
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2429
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: idctdsp.h:56
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
Definition: vc1data.c:1076
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:839
#define B_FRACTION_DEN
Definition: vc1data.h:99
VLC ff_vc1_ttmb_vlc[3]
Definition: vc1data.c:115
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:531
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1dec.c:1401
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:370
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
Definition: msmpeg4data.c:1825
VLC * twomvbp_vlc
Definition: vc1.h:350
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:250
AVCodec ff_vc1_decoder
Definition: vc1dec.c:6174
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2833
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1dec.c:1322
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:196
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:5746
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2422
int x8_type
Definition: vc1.h:390
#define FFMIN(a, b)
Definition: common.h:57
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:5595
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:476
uint8_t * blk_mv_type_base
Definition: vc1.h:357
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
Definition: intrax8.c:695
int field_mode
1 for interlaced field pictures
Definition: vc1.h:360
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
Definition: intrax8.c:713
static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2800
int width
picture width / height.
Definition: avcodec.h:1229
int8_t zzi_8x8[64]
Definition: vc1.h:356
#define VC1_SUBBLKPAT_VLC_BITS
Definition: vc1data.c:128
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:322
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:241
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:2684
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:310
int fourmvswitch
Definition: vc1.h:344
int mb_off
Definition: vc1.h:372
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2639
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:45
static void vc1_decode_skip_blocks(VC1Context *v)
Definition: vc1dec.c:5188
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3384
int size_in_bits
Definition: get_bits.h:56
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:5520
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:624
static const int offset_table[6]
Definition: vc1dec.c:3382
static int median4(int a, int b, int c, int d)
Definition: vc1dec.c:551
#define FFABS(a)
Definition: common.h:52
int level
level
Definition: avcodec.h:2721
static int vc1_decode_p_mb_intfr(VC1Context *v)
Definition: vc1dec.c:3779
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:522
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:209
MotionEstContext me
Definition: mpegvideo.h:404
#define AV_EF_EXPLODE
Definition: avcodec.h:2433
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
Definition: vc1dec.c:1183
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3448
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:28
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:2683
uint32_t * cbp
Definition: vc1.h:394
int left_blk_sh
Definition: vc1.h:248
int16_t(* luma_mv_base)[2]
Definition: vc1.h:396
uint8_t * fieldtx_plane
Definition: vc1.h:354
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:415
int * ttblk_base
Definition: vc1.h:267
VLC * cbpcy_vlc
CBPCY VLC table.
Definition: vc1.h:292
static int decode210(GetBitContext *gb)
Definition: get_bits.h:547
if(ac->has_optimized_func)
static const float pred[4]
Definition: siprdata.h:259
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
Definition: vc1.h:385
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: utils.c:902
static const int8_t mv[256][2]
Definition: 4xm.c:75
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:63
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
Definition: vc1dec.c:170
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1dec.c:1908
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:546
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1290
static const int offset_table1[9]
Definition: vc1dec.c:53
NULL
Definition: eval.c:55
static int width
Definition: utils.c:156
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:192
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:49
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Definition: vc1.h:248
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:260
enum AVCodecID codec_id
Definition: avcodec.h:1067
BlockDSPContext bdsp
Definition: mpegvideo.h:351
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:58
int c_avail
Definition: vc1.h:271
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
Definition: vc1data.c:1047
static const uint8_t vc1_delta_run_table[AC_MODES][57]
Definition: vc1acdata.h:295
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1dec.c:564
uint32_t * cbp_base
Definition: vc1.h:394
main external API structure.
Definition: avcodec.h:1050
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
uint8_t * is_intra
Definition: vc1.h:395
static int vc1_decode_p_mb_intfi(VC1Context *v)
Definition: vc1dec.c:3992
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:318
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:223
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
Definition: vc1dec.c:4269
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:612
static void init_block_index(VC1Context *v)
Definition: vc1dec.c:78
int curr_use_ic
Definition: vc1.h:305
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:68
int extradata_size
Definition: avcodec.h:1165
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
Definition: vc1data.c:53
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
int sprite_width
Definition: vc1.h:384
int fmb_is_raw
forward mb plane is raw
Definition: vc1.h:299
uint8_t * is_intra_base
Definition: vc1.h:395
int coded_height
Definition: avcodec.h:1244
Definition: vc1.h:139
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
#define MB_INTRA_VLC_BITS
Definition: vc1dec.c:48
int index
Definition: gxfenc.c:72
struct AVFrame * f
Definition: mpegvideo.h:100
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:190
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
Definition: vc1dec.c:753
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int context_initialized
Definition: mpegvideo.h:250
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:124
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
Definition: vc1dec.c:2458
#define MB_TYPE_16x16
Definition: avcodec.h:778
#define mid_pred
Definition: mathops.h:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:261
int dim
int skip_is_raw
skip mb plane is not coded
Definition: vc1.h:300
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1677
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
Definition: intrax8.c:728
#define FF_PROFILE_VC1_COMPLEX
Definition: avcodec.h:2685
uint8_t next_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:303
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:304
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:371
uint8_t * direct_mb_plane
bitplane for "direct" MBs
Definition: vc1.h:295
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
Definition: vc1acdata.h:339
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:138
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:339
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
Definition: vc1dec.c:5781
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
Definition: vc1.h:294
Definition: vf_drawbox.c:37
int numref
number of past field pictures used as reference
Definition: vc1.h:364
const int32_t ff_vc1_dqscale[63]
Definition: vc1data.c:1085
int blocks_off
Definition: vc1.h:372
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:62
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
uint8_t tff
Definition: vc1.h:319
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:315
uint8_t level
Definition: svq3.c:147
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:192
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:398
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:257
MpegEncContext s
Definition: vc1.h:183
int height
Definition: gxfenc.c:72
in the bitstream is reported as 10b
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:204
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:309
int8_t * qscale_table
Definition: mpegvideo.h:104
struct AVCodecContext * avctx
Definition: mpegvideo.h:221
int cur_blk_idx
Definition: vc1.h:393
uint8_t pq
Definition: vc1.h:246
static const int offset_table2[9]
Definition: vc1dec.c:54
discard all non reference
Definition: avcodec.h:565
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
Definition: vc1dec.c:4771
int pqindex
raw pqindex used in coding set selection
Definition: vc1.h:270
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
Definition: vc1acdata.h:246
#define VC1_1REF_MVDATA_VLC_BITS
Definition: vc1data.c:138
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
Y , 8bpp.
Definition: pixfmt.h:73
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:256
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
Definition: vc1dec.c:2076
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:637
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:117
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
Definition: vc1dec.c:6160
#define VC1_TTMB_VLC_BITS
Definition: vc1data.c:114
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
uint8_t * dest[3]
Definition: mpegvideo.h:417
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1797
static const int size_table[6]
Definition: vc1dec.c:3381
int output_width
Definition: vc1.h:384
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:316
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:288
uint8_t dquantfrm
pquant parameters
Definition: vc1.h:253
uint8_t next_luty[2][256]
Definition: vc1.h:303
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:308
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2800
int res_fasttx
reserved, always 1
Definition: vc1.h:196
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2729
int pic_header_flag
Definition: vc1.h:377
int * ttblk
Transform type at the block level.
Definition: vc1.h:267
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
VLC ff_vc1_ac_coeff_table[8]
Definition: vc1data.c:143
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
Definition: vc1dsp.h:46
uint8_t condover
Definition: vc1.h:333
void * priv_data
Definition: avcodec.h:1092
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Definition: vc1.c:526
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
Definition: vc1data.c:130
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:2686
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
Definition: vc1.h:291
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:27
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1dec.c:1427
int rnd
rounding control
Definition: vc1.h:306
VideoDSPContext vdsp
Definition: mpegvideo.h:360
Definition: vc1.h:142
AVFrame * sprite_output_frame
Definition: vc1.h:383
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1889
int acpred_is_raw
Definition: vc1.h:330
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
Definition: vc1dsp.h:67
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
Definition: vc1data.c:1058
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:600
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:92
uint8_t rptfrm
Definition: vc1.h:319
uint8_t(* curr_luty)[256]
Definition: vc1.h:304
static int decode012(GetBitContext *gb)
Definition: get_bits.h:537
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:66
int bmvtype
Definition: vc1.h:374
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:294
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:191
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1dec.c:808
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
Definition: vc1dec.c:4915
H264ChromaContext h264chroma
Definition: vc1.h:185
int overflg_is_raw
Definition: vc1.h:332
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
Definition: vc1.h:424
Definition: vc1.h:136
int level
Advanced Profile.
Definition: vc1.h:206
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:238
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:375
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegvideo.h:110
VLC ff_msmp4_mb_i_vlc
Definition: msmpeg4data.c:38
#define av_always_inline
Definition: attributes.h:40
uint8_t mv_mode2
Secondary MV coding mode (B frames)
Definition: vc1.h:242
int new_sprite
Frame decoding info for sprite modes.
Definition: vc1.h:381
uint8_t * mv_f_next[2]
Definition: vc1.h:359
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:735
#define FFSWAP(type, a, b)
Definition: common.h:60
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
Definition: vc1dsp.h:47
int two_sprites
Definition: vc1.h:382
int codingset
index of current table set from 11.8 to use for luma block decoding
Definition: vc1.h:268
uint8_t * mb_type[3]
Definition: vc1.h:272
uint16_t * hrd_buffer
Definition: vc1.h:334
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2985
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2996
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
Definition: vc1data.c:132
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
void(* vc1_v_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:44
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
VLC * fourmvbp_vlc
Definition: vc1.h:351
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int dc_table_index
Definition: mpegvideo.h:543
VLC ff_msmp4_dc_luma_vlc[2]
Definition: msmpeg4data.c:39
VLC ff_vc1_subblkpat_vlc[3]
Definition: vc1data.c:129
#define inc_blk_idx(idx)
uint8_t halfpq
Uniform quant over image and qp+.5.
Definition: vc1.h:281
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1dec.c:2114
static const uint8_t vc1_delta_level_table[AC_MODES][31]
Definition: vc1acdata.h:203
VC1DSPContext vc1dsp
Definition: vc1.h:186
Predicted.
Definition: avutil.h:254
uint8_t((* curr_lutuv)[256]
Definition: vc1.h:304
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
Definition: vc1.h:410
int output_height
Definition: vc1.h:384
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:42
VLC ff_msmp4_dc_chroma_vlc[2]
Definition: msmpeg4data.c:40
op_pixels_func put_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:58
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
Definition: vc1dsp.h:66
HpelDSPContext hdsp
Definition: mpegvideo.h:353
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:53
static int16_t block[64]
Definition: dct-test.c:88