vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "dsputil.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "vc1.h"
35 #include "vc1data.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
38 #include "unary.h"
39 #include "mathops.h"
40 #include "vdpau_internal.h"
41 
42 #undef NDEBUG
43 #include <assert.h>
44 
45 #define MB_INTRA_VLC_BITS 9
46 #define DC_VLC_BITS 9
47 
48 
49 // offset tables for interlaced picture MVDATA decoding
50 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
51 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
52 
53 /***********************************************************************/
64 enum Imode {
72 }; //imode defines
74 
75  //Bitplane group
77 
79 {
80  MpegEncContext *s = &v->s;
81  int topleft_mb_pos, top_mb_pos;
82  int stride_y, fieldtx;
83  int v_dist;
84 
85  /* The put pixels loop is always one MB row behind the decoding loop,
86  * because we can only put pixels when overlap filtering is done, and
87  * for filtering of the bottom edge of a MB, we need the next MB row
88  * present as well.
89  * Within the row, the put pixels loop is also one MB col behind the
90  * decoding loop. The reason for this is again, because for filtering
91  * of the right MB edge, we need the next MB present. */
92  if (!s->first_slice_line) {
93  if (s->mb_x) {
94  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
95  fieldtx = v->fieldtx_plane[topleft_mb_pos];
96  stride_y = s->linesize << fieldtx;
97  v_dist = (16 - fieldtx) >> (fieldtx == 0);
99  s->dest[0] - 16 * s->linesize - 16,
100  stride_y);
102  s->dest[0] - 16 * s->linesize - 8,
103  stride_y);
105  s->dest[0] - v_dist * s->linesize - 16,
106  stride_y);
108  s->dest[0] - v_dist * s->linesize - 8,
109  stride_y);
111  s->dest[1] - 8 * s->uvlinesize - 8,
112  s->uvlinesize);
114  s->dest[2] - 8 * s->uvlinesize - 8,
115  s->uvlinesize);
116  }
117  if (s->mb_x == s->mb_width - 1) {
118  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
119  fieldtx = v->fieldtx_plane[top_mb_pos];
120  stride_y = s->linesize << fieldtx;
121  v_dist = fieldtx ? 15 : 8;
123  s->dest[0] - 16 * s->linesize,
124  stride_y);
126  s->dest[0] - 16 * s->linesize + 8,
127  stride_y);
129  s->dest[0] - v_dist * s->linesize,
130  stride_y);
132  s->dest[0] - v_dist * s->linesize + 8,
133  stride_y);
135  s->dest[1] - 8 * s->uvlinesize,
136  s->uvlinesize);
138  s->dest[2] - 8 * s->uvlinesize,
139  s->uvlinesize);
140  }
141  }
142 
143 #define inc_blk_idx(idx) do { \
144  idx++; \
145  if (idx >= v->n_allocated_blks) \
146  idx = 0; \
147  } while (0)
148 
153 }
154 
155 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
156 {
157  MpegEncContext *s = &v->s;
158  int j;
159  if (!s->first_slice_line) {
160  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
161  if (s->mb_x)
162  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
163  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
164  for (j = 0; j < 2; j++) {
165  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
166  if (s->mb_x)
167  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
168  }
169  }
170  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
171 
172  if (s->mb_y == s->end_mb_y - 1) {
173  if (s->mb_x) {
174  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
175  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
176  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
177  }
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
179  }
180 }
181 
183 {
184  MpegEncContext *s = &v->s;
185  int j;
186 
187  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
188  * means it runs two rows/cols behind the decoding loop. */
189  if (!s->first_slice_line) {
190  if (s->mb_x) {
191  if (s->mb_y >= s->start_mb_y + 2) {
192  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
193 
194  if (s->mb_x >= 2)
195  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
196  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
197  for (j = 0; j < 2; j++) {
198  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
199  if (s->mb_x >= 2) {
200  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
201  }
202  }
203  }
204  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
205  }
206 
207  if (s->mb_x == s->mb_width - 1) {
208  if (s->mb_y >= s->start_mb_y + 2) {
209  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
210 
211  if (s->mb_x)
212  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
213  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
214  for (j = 0; j < 2; j++) {
215  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
216  if (s->mb_x >= 2) {
217  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
218  }
219  }
220  }
221  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
222  }
223 
224  if (s->mb_y == s->end_mb_y) {
225  if (s->mb_x) {
226  if (s->mb_x >= 2)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
229  if (s->mb_x >= 2) {
230  for (j = 0; j < 2; j++) {
231  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
232  }
233  }
234  }
235 
236  if (s->mb_x == s->mb_width - 1) {
237  if (s->mb_x)
238  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
240  if (s->mb_x) {
241  for (j = 0; j < 2; j++) {
242  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
243  }
244  }
245  }
246  }
247  }
248 }
249 
251 {
252  MpegEncContext *s = &v->s;
253  int mb_pos;
254 
255  if (v->condover == CONDOVER_NONE)
256  return;
257 
258  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
259 
260  /* Within a MB, the horizontal overlap always runs before the vertical.
261  * To accomplish that, we run the H on left and internal borders of the
262  * currently decoded MB. Then, we wait for the next overlap iteration
263  * to do H overlap on the right edge of this MB, before moving over and
264  * running the V overlap. Therefore, the V overlap makes us trail by one
265  * MB col and the H overlap filter makes us trail by one MB row. This
266  * is reflected in the time at which we run the put_pixels loop. */
267  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
268  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
269  v->over_flags_plane[mb_pos - 1])) {
271  v->block[v->cur_blk_idx][0]);
273  v->block[v->cur_blk_idx][2]);
274  if (!(s->flags & CODEC_FLAG_GRAY)) {
276  v->block[v->cur_blk_idx][4]);
278  v->block[v->cur_blk_idx][5]);
279  }
280  }
282  v->block[v->cur_blk_idx][1]);
284  v->block[v->cur_blk_idx][3]);
285 
286  if (s->mb_x == s->mb_width - 1) {
287  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
288  v->over_flags_plane[mb_pos - s->mb_stride])) {
290  v->block[v->cur_blk_idx][0]);
292  v->block[v->cur_blk_idx][1]);
293  if (!(s->flags & CODEC_FLAG_GRAY)) {
295  v->block[v->cur_blk_idx][4]);
297  v->block[v->cur_blk_idx][5]);
298  }
299  }
301  v->block[v->cur_blk_idx][2]);
303  v->block[v->cur_blk_idx][3]);
304  }
305  }
306  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
307  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
308  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310  v->block[v->left_blk_idx][0]);
312  v->block[v->left_blk_idx][1]);
313  if (!(s->flags & CODEC_FLAG_GRAY)) {
315  v->block[v->left_blk_idx][4]);
317  v->block[v->left_blk_idx][5]);
318  }
319  }
321  v->block[v->left_blk_idx][2]);
323  v->block[v->left_blk_idx][3]);
324  }
325 }
326 
330 static void vc1_mc_1mv(VC1Context *v, int dir)
331 {
332  MpegEncContext *s = &v->s;
333  DSPContext *dsp = &v->s.dsp;
334  uint8_t *srcY, *srcU, *srcV;
335  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
336  int off, off_uv;
337  int v_edge_pos = s->v_edge_pos >> v->field_mode;
338 
339  if ((!v->field_mode ||
340  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
341  !v->s.last_picture.f.data[0])
342  return;
343 
344  mx = s->mv[dir][0][0];
345  my = s->mv[dir][0][1];
346 
347  // store motion vectors for further use in B frames
348  if (s->pict_type == AV_PICTURE_TYPE_P) {
349  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
350  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
351  }
352 
353  uvmx = (mx + ((mx & 3) == 3)) >> 1;
354  uvmy = (my + ((my & 3) == 3)) >> 1;
355  v->luma_mv[s->mb_x][0] = uvmx;
356  v->luma_mv[s->mb_x][1] = uvmy;
357 
358  if (v->field_mode &&
359  v->cur_field_type != v->ref_field_type[dir]) {
360  my = my - 2 + 4 * v->cur_field_type;
361  uvmy = uvmy - 2 + 4 * v->cur_field_type;
362  }
363 
364  // fastuvmc shall be ignored for interlaced frame picture
365  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
366  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
367  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
368  }
369  if (v->field_mode) { // interlaced field picture
370  if (!dir) {
371  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
372  srcY = s->current_picture.f.data[0];
373  srcU = s->current_picture.f.data[1];
374  srcV = s->current_picture.f.data[2];
375  } else {
376  srcY = s->last_picture.f.data[0];
377  srcU = s->last_picture.f.data[1];
378  srcV = s->last_picture.f.data[2];
379  }
380  } else {
381  srcY = s->next_picture.f.data[0];
382  srcU = s->next_picture.f.data[1];
383  srcV = s->next_picture.f.data[2];
384  }
385  } else {
386  if (!dir) {
387  srcY = s->last_picture.f.data[0];
388  srcU = s->last_picture.f.data[1];
389  srcV = s->last_picture.f.data[2];
390  } else {
391  srcY = s->next_picture.f.data[0];
392  srcU = s->next_picture.f.data[1];
393  srcV = s->next_picture.f.data[2];
394  }
395  }
396 
397  if (!srcY || !srcU) {
398  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
399  return;
400  }
401 
402  src_x = s->mb_x * 16 + (mx >> 2);
403  src_y = s->mb_y * 16 + (my >> 2);
404  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
405  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
406 
407  if (v->profile != PROFILE_ADVANCED) {
408  src_x = av_clip( src_x, -16, s->mb_width * 16);
409  src_y = av_clip( src_y, -16, s->mb_height * 16);
410  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
411  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
412  } else {
413  src_x = av_clip( src_x, -17, s->avctx->coded_width);
414  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
415  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
416  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
417  }
418 
419  srcY += src_y * s->linesize + src_x;
420  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
421  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
422 
423  if (v->field_mode && v->ref_field_type[dir]) {
424  srcY += s->current_picture_ptr->f.linesize[0];
425  srcU += s->current_picture_ptr->f.linesize[1];
426  srcV += s->current_picture_ptr->f.linesize[2];
427  }
428 
429  /* for grayscale we should not try to read from unknown area */
430  if (s->flags & CODEC_FLAG_GRAY) {
431  srcU = s->edge_emu_buffer + 18 * s->linesize;
432  srcV = s->edge_emu_buffer + 18 * s->linesize;
433  }
434 
436  || s->h_edge_pos < 22 || v_edge_pos < 22
437  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
438  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
439  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
440 
441  srcY -= s->mspel * (1 + s->linesize);
443  17 + s->mspel * 2, 17 + s->mspel * 2,
444  src_x - s->mspel, src_y - s->mspel,
445  s->h_edge_pos, v_edge_pos);
446  srcY = s->edge_emu_buffer;
447  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
448  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
449  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
450  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
451  srcU = uvbuf;
452  srcV = uvbuf + 16;
453  /* if we deal with range reduction we need to scale source blocks */
454  if (v->rangeredfrm) {
455  int i, j;
456  uint8_t *src, *src2;
457 
458  src = srcY;
459  for (j = 0; j < 17 + s->mspel * 2; j++) {
460  for (i = 0; i < 17 + s->mspel * 2; i++)
461  src[i] = ((src[i] - 128) >> 1) + 128;
462  src += s->linesize;
463  }
464  src = srcU;
465  src2 = srcV;
466  for (j = 0; j < 9; j++) {
467  for (i = 0; i < 9; i++) {
468  src[i] = ((src[i] - 128) >> 1) + 128;
469  src2[i] = ((src2[i] - 128) >> 1) + 128;
470  }
471  src += s->uvlinesize;
472  src2 += s->uvlinesize;
473  }
474  }
475  /* if we deal with intensity compensation we need to scale source blocks */
476  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
477  int i, j;
478  uint8_t *src, *src2;
479 
480  src = srcY;
481  for (j = 0; j < 17 + s->mspel * 2; j++) {
482  for (i = 0; i < 17 + s->mspel * 2; i++)
483  src[i] = v->luty[src[i]];
484  src += s->linesize;
485  }
486  src = srcU;
487  src2 = srcV;
488  for (j = 0; j < 9; j++) {
489  for (i = 0; i < 9; i++) {
490  src[i] = v->lutuv[src[i]];
491  src2[i] = v->lutuv[src2[i]];
492  }
493  src += s->uvlinesize;
494  src2 += s->uvlinesize;
495  }
496  }
497  srcY += s->mspel * (1 + s->linesize);
498  }
499 
500  if (v->field_mode && v->cur_field_type) {
501  off = s->current_picture_ptr->f.linesize[0];
502  off_uv = s->current_picture_ptr->f.linesize[1];
503  } else {
504  off = 0;
505  off_uv = 0;
506  }
507  if (s->mspel) {
508  dxy = ((my & 3) << 2) | (mx & 3);
509  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
510  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
511  srcY += s->linesize * 8;
512  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
513  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
514  } else { // hpel mc - always used for luma
515  dxy = (my & 2) | ((mx & 2) >> 1);
516  if (!v->rnd)
517  dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
518  else
519  dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
520  }
521 
522  if (s->flags & CODEC_FLAG_GRAY) return;
523  /* Chroma MC always uses qpel bilinear */
524  uvmx = (uvmx & 3) << 1;
525  uvmy = (uvmy & 3) << 1;
526  if (!v->rnd) {
527  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
528  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
529  } else {
530  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
531  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
532  }
533 }
534 
535 static inline int median4(int a, int b, int c, int d)
536 {
537  if (a < b) {
538  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
539  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
540  } else {
541  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
542  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
543  }
544 }
545 
548 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
549 {
550  MpegEncContext *s = &v->s;
551  DSPContext *dsp = &v->s.dsp;
552  uint8_t *srcY;
553  int dxy, mx, my, src_x, src_y;
554  int off;
555  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
556  int v_edge_pos = s->v_edge_pos >> v->field_mode;
557 
558  if ((!v->field_mode ||
559  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
560  !v->s.last_picture.f.data[0])
561  return;
562 
563  mx = s->mv[dir][n][0];
564  my = s->mv[dir][n][1];
565 
566  if (!dir) {
567  if (v->field_mode) {
568  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
569  srcY = s->current_picture.f.data[0];
570  else
571  srcY = s->last_picture.f.data[0];
572  } else
573  srcY = s->last_picture.f.data[0];
574  } else
575  srcY = s->next_picture.f.data[0];
576 
577  if (!srcY) {
578  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
579  return;
580  }
581 
582  if (v->field_mode) {
583  if (v->cur_field_type != v->ref_field_type[dir])
584  my = my - 2 + 4 * v->cur_field_type;
585  }
586 
587  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
588  int same_count = 0, opp_count = 0, k;
589  int chosen_mv[2][4][2], f;
590  int tx, ty;
591  for (k = 0; k < 4; k++) {
592  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
593  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
594  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
595  opp_count += f;
596  same_count += 1 - f;
597  }
598  f = opp_count > same_count;
599  switch (f ? opp_count : same_count) {
600  case 4:
601  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
602  chosen_mv[f][2][0], chosen_mv[f][3][0]);
603  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
604  chosen_mv[f][2][1], chosen_mv[f][3][1]);
605  break;
606  case 3:
607  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
608  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
609  break;
610  case 2:
611  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
612  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
613  break;
614  }
615  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
616  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
617  for (k = 0; k < 4; k++)
618  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
619  }
620 
621  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
622  int qx, qy;
623  int width = s->avctx->coded_width;
624  int height = s->avctx->coded_height >> 1;
625  qx = (s->mb_x * 16) + (mx >> 2);
626  qy = (s->mb_y * 8) + (my >> 3);
627 
628  if (qx < -17)
629  mx -= 4 * (qx + 17);
630  else if (qx > width)
631  mx -= 4 * (qx - width);
632  if (qy < -18)
633  my -= 8 * (qy + 18);
634  else if (qy > height + 1)
635  my -= 8 * (qy - height - 1);
636  }
637 
638  if ((v->fcm == ILACE_FRAME) && fieldmv)
639  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
640  else
641  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
642  if (v->field_mode && v->cur_field_type)
643  off += s->current_picture_ptr->f.linesize[0];
644 
645  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
646  if (!fieldmv)
647  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
648  else
649  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
650 
651  if (v->profile != PROFILE_ADVANCED) {
652  src_x = av_clip(src_x, -16, s->mb_width * 16);
653  src_y = av_clip(src_y, -16, s->mb_height * 16);
654  } else {
655  src_x = av_clip(src_x, -17, s->avctx->coded_width);
656  if (v->fcm == ILACE_FRAME) {
657  if (src_y & 1)
658  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
659  else
660  src_y = av_clip(src_y, -18, s->avctx->coded_height);
661  } else {
662  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
663  }
664  }
665 
666  srcY += src_y * s->linesize + src_x;
667  if (v->field_mode && v->ref_field_type[dir])
668  srcY += s->current_picture_ptr->f.linesize[0];
669 
670  if (fieldmv && !(src_y & 1))
671  v_edge_pos--;
672  if (fieldmv && (src_y & 1) && src_y < 4)
673  src_y--;
675  || s->h_edge_pos < 13 || v_edge_pos < 23
676  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
677  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
678  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
679  /* check emulate edge stride and offset */
681  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
682  src_x - s->mspel, src_y - (s->mspel << fieldmv),
683  s->h_edge_pos, v_edge_pos);
684  srcY = s->edge_emu_buffer;
685  /* if we deal with range reduction we need to scale source blocks */
686  if (v->rangeredfrm) {
687  int i, j;
688  uint8_t *src;
689 
690  src = srcY;
691  for (j = 0; j < 9 + s->mspel * 2; j++) {
692  for (i = 0; i < 9 + s->mspel * 2; i++)
693  src[i] = ((src[i] - 128) >> 1) + 128;
694  src += s->linesize << fieldmv;
695  }
696  }
697  /* if we deal with intensity compensation we need to scale source blocks */
698  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
699  int i, j;
700  uint8_t *src;
701 
702  src = srcY;
703  for (j = 0; j < 9 + s->mspel * 2; j++) {
704  for (i = 0; i < 9 + s->mspel * 2; i++)
705  src[i] = v->luty[src[i]];
706  src += s->linesize << fieldmv;
707  }
708  }
709  srcY += s->mspel * (1 + (s->linesize << fieldmv));
710  }
711 
712  if (s->mspel) {
713  dxy = ((my & 3) << 2) | (mx & 3);
714  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
715  } else { // hpel mc - always used for luma
716  dxy = (my & 2) | ((mx & 2) >> 1);
717  if (!v->rnd)
718  dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
719  else
720  dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
721  }
722 }
723 
724 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
725 {
726  int idx, i;
727  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
728 
729  idx = ((a[3] != flag) << 3)
730  | ((a[2] != flag) << 2)
731  | ((a[1] != flag) << 1)
732  | (a[0] != flag);
733  if (!idx) {
734  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
735  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
736  return 4;
737  } else if (count[idx] == 1) {
738  switch (idx) {
739  case 0x1:
740  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
741  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
742  return 3;
743  case 0x2:
744  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
745  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
746  return 3;
747  case 0x4:
748  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
749  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
750  return 3;
751  case 0x8:
752  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
753  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
754  return 3;
755  }
756  } else if (count[idx] == 2) {
757  int t1 = 0, t2 = 0;
758  for (i = 0; i < 3; i++)
759  if (!a[i]) {
760  t1 = i;
761  break;
762  }
763  for (i = t1 + 1; i < 4; i++)
764  if (!a[i]) {
765  t2 = i;
766  break;
767  }
768  *tx = (mvx[t1] + mvx[t2]) / 2;
769  *ty = (mvy[t1] + mvy[t2]) / 2;
770  return 2;
771  } else {
772  return 0;
773  }
774  return -1;
775 }
776 
779 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
780 {
781  MpegEncContext *s = &v->s;
782  DSPContext *dsp = &v->s.dsp;
783  uint8_t *srcU, *srcV;
784  int uvmx, uvmy, uvsrc_x, uvsrc_y;
785  int k, tx = 0, ty = 0;
786  int mvx[4], mvy[4], intra[4], mv_f[4];
787  int valid_count;
788  int chroma_ref_type = v->cur_field_type, off = 0;
789  int v_edge_pos = s->v_edge_pos >> v->field_mode;
790 
791  if (!v->field_mode && !v->s.last_picture.f.data[0])
792  return;
793  if (s->flags & CODEC_FLAG_GRAY)
794  return;
795 
796  for (k = 0; k < 4; k++) {
797  mvx[k] = s->mv[dir][k][0];
798  mvy[k] = s->mv[dir][k][1];
799  intra[k] = v->mb_type[0][s->block_index[k]];
800  if (v->field_mode)
801  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
802  }
803 
804  /* calculate chroma MV vector from four luma MVs */
805  if (!v->field_mode || (v->field_mode && !v->numref)) {
806  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
807  chroma_ref_type = v->reffield;
808  if (!valid_count) {
809  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
810  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
811  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
812  return; //no need to do MC for intra blocks
813  }
814  } else {
815  int dominant = 0;
816  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
817  dominant = 1;
818  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
819  if (dominant)
820  chroma_ref_type = !v->cur_field_type;
821  }
822  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
823  return;
824  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
825  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
826  uvmx = (tx + ((tx & 3) == 3)) >> 1;
827  uvmy = (ty + ((ty & 3) == 3)) >> 1;
828 
829  v->luma_mv[s->mb_x][0] = uvmx;
830  v->luma_mv[s->mb_x][1] = uvmy;
831 
832  if (v->fastuvmc) {
833  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
834  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
835  }
836  // Field conversion bias
837  if (v->cur_field_type != chroma_ref_type)
838  uvmy += 2 - 4 * chroma_ref_type;
839 
840  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
841  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
842 
843  if (v->profile != PROFILE_ADVANCED) {
844  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
845  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
846  } else {
847  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
848  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
849  }
850 
851  if (!dir) {
852  if (v->field_mode) {
853  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
854  srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
855  srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
856  } else {
857  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
858  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
859  }
860  } else {
861  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
862  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
863  }
864  } else {
865  srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
866  srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
867  }
868 
869  if (!srcU) {
870  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
871  return;
872  }
873 
874  if (v->field_mode) {
875  if (chroma_ref_type) {
876  srcU += s->current_picture_ptr->f.linesize[1];
877  srcV += s->current_picture_ptr->f.linesize[2];
878  }
879  off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
880  }
881 
883  || s->h_edge_pos < 18 || v_edge_pos < 18
884  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
885  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
887  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
888  s->h_edge_pos >> 1, v_edge_pos >> 1);
889  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
890  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
891  s->h_edge_pos >> 1, v_edge_pos >> 1);
892  srcU = s->edge_emu_buffer;
893  srcV = s->edge_emu_buffer + 16;
894 
895  /* if we deal with range reduction we need to scale source blocks */
896  if (v->rangeredfrm) {
897  int i, j;
898  uint8_t *src, *src2;
899 
900  src = srcU;
901  src2 = srcV;
902  for (j = 0; j < 9; j++) {
903  for (i = 0; i < 9; i++) {
904  src[i] = ((src[i] - 128) >> 1) + 128;
905  src2[i] = ((src2[i] - 128) >> 1) + 128;
906  }
907  src += s->uvlinesize;
908  src2 += s->uvlinesize;
909  }
910  }
911  /* if we deal with intensity compensation we need to scale source blocks */
912  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
913  int i, j;
914  uint8_t *src, *src2;
915 
916  src = srcU;
917  src2 = srcV;
918  for (j = 0; j < 9; j++) {
919  for (i = 0; i < 9; i++) {
920  src[i] = v->lutuv[src[i]];
921  src2[i] = v->lutuv[src2[i]];
922  }
923  src += s->uvlinesize;
924  src2 += s->uvlinesize;
925  }
926  }
927  }
928 
929  /* Chroma MC always uses qpel bilinear */
930  uvmx = (uvmx & 3) << 1;
931  uvmy = (uvmy & 3) << 1;
932  if (!v->rnd) {
933  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
934  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
935  } else {
936  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
937  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
938  }
939 }
940 
944 {
945  MpegEncContext *s = &v->s;
946  DSPContext *dsp = &v->s.dsp;
947  uint8_t *srcU, *srcV;
948  int uvsrc_x, uvsrc_y;
949  int uvmx_field[4], uvmy_field[4];
950  int i, off, tx, ty;
951  int fieldmv = v->blk_mv_type[s->block_index[0]];
952  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
953  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
954  int v_edge_pos = s->v_edge_pos >> 1;
955 
956  if (!v->s.last_picture.f.data[0])
957  return;
958  if (s->flags & CODEC_FLAG_GRAY)
959  return;
960 
961  for (i = 0; i < 4; i++) {
962  tx = s->mv[0][i][0];
963  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
964  ty = s->mv[0][i][1];
965  if (fieldmv)
966  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
967  else
968  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
969  }
970 
971  for (i = 0; i < 4; i++) {
972  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
973  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
974  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
975  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
976  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
977  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
978  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
979  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
980  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
981  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
982 
983  if (fieldmv && !(uvsrc_y & 1))
984  v_edge_pos--;
985  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
986  uvsrc_y--;
987  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
988  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
989  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
990  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
992  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
993  s->h_edge_pos >> 1, v_edge_pos);
994  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
995  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
996  s->h_edge_pos >> 1, v_edge_pos);
997  srcU = s->edge_emu_buffer;
998  srcV = s->edge_emu_buffer + 16;
999 
1000  /* if we deal with intensity compensation we need to scale source blocks */
1001  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1002  int i, j;
1003  uint8_t *src, *src2;
1004 
1005  src = srcU;
1006  src2 = srcV;
1007  for (j = 0; j < 5; j++) {
1008  for (i = 0; i < 5; i++) {
1009  src[i] = v->lutuv[src[i]];
1010  src2[i] = v->lutuv[src2[i]];
1011  }
1012  src += s->uvlinesize << 1;
1013  src2 += s->uvlinesize << 1;
1014  }
1015  }
1016  }
1017  if (!v->rnd) {
1018  dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1019  dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1020  } else {
1021  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1022  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1023  }
1024  }
1025 }
1026 
1027 /***********************************************************************/
1038 #define GET_MQUANT() \
1039  if (v->dquantfrm) { \
1040  int edges = 0; \
1041  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1042  if (v->dqbilevel) { \
1043  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1044  } else { \
1045  mqdiff = get_bits(gb, 3); \
1046  if (mqdiff != 7) \
1047  mquant = v->pq + mqdiff; \
1048  else \
1049  mquant = get_bits(gb, 5); \
1050  } \
1051  } \
1052  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1053  edges = 1 << v->dqsbedge; \
1054  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1055  edges = (3 << v->dqsbedge) % 15; \
1056  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1057  edges = 15; \
1058  if ((edges&1) && !s->mb_x) \
1059  mquant = v->altpq; \
1060  if ((edges&2) && s->first_slice_line) \
1061  mquant = v->altpq; \
1062  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1063  mquant = v->altpq; \
1064  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1065  mquant = v->altpq; \
1066  if (!mquant || mquant > 31) { \
1067  av_log(v->s.avctx, AV_LOG_ERROR, \
1068  "Overriding invalid mquant %d\n", mquant); \
1069  mquant = 1; \
1070  } \
1071  }
1072 
1080 #define GET_MVDATA(_dmv_x, _dmv_y) \
1081  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1082  VC1_MV_DIFF_VLC_BITS, 2); \
1083  if (index > 36) { \
1084  mb_has_coeffs = 1; \
1085  index -= 37; \
1086  } else \
1087  mb_has_coeffs = 0; \
1088  s->mb_intra = 0; \
1089  if (!index) { \
1090  _dmv_x = _dmv_y = 0; \
1091  } else if (index == 35) { \
1092  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1093  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1094  } else if (index == 36) { \
1095  _dmv_x = 0; \
1096  _dmv_y = 0; \
1097  s->mb_intra = 1; \
1098  } else { \
1099  index1 = index % 6; \
1100  if (!s->quarter_sample && index1 == 5) val = 1; \
1101  else val = 0; \
1102  if (size_table[index1] - val > 0) \
1103  val = get_bits(gb, size_table[index1] - val); \
1104  else val = 0; \
1105  sign = 0 - (val&1); \
1106  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1107  \
1108  index1 = index / 6; \
1109  if (!s->quarter_sample && index1 == 5) val = 1; \
1110  else val = 0; \
1111  if (size_table[index1] - val > 0) \
1112  val = get_bits(gb, size_table[index1] - val); \
1113  else val = 0; \
1114  sign = 0 - (val & 1); \
1115  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1116  }
1117 
1119  int *dmv_y, int *pred_flag)
1120 {
1121  int index, index1;
1122  int extend_x = 0, extend_y = 0;
1123  GetBitContext *gb = &v->s.gb;
1124  int bits, esc;
1125  int val, sign;
1126  const int* offs_tab;
1127 
1128  if (v->numref) {
1129  bits = VC1_2REF_MVDATA_VLC_BITS;
1130  esc = 125;
1131  } else {
1132  bits = VC1_1REF_MVDATA_VLC_BITS;
1133  esc = 71;
1134  }
1135  switch (v->dmvrange) {
1136  case 1:
1137  extend_x = 1;
1138  break;
1139  case 2:
1140  extend_y = 1;
1141  break;
1142  case 3:
1143  extend_x = extend_y = 1;
1144  break;
1145  }
1146  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1147  if (index == esc) {
1148  *dmv_x = get_bits(gb, v->k_x);
1149  *dmv_y = get_bits(gb, v->k_y);
1150  if (v->numref) {
1151  if (pred_flag) {
1152  *pred_flag = *dmv_y & 1;
1153  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1154  } else {
1155  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1156  }
1157  }
1158  }
1159  else {
1160  if (extend_x)
1161  offs_tab = offset_table2;
1162  else
1163  offs_tab = offset_table1;
1164  index1 = (index + 1) % 9;
1165  if (index1 != 0) {
1166  val = get_bits(gb, index1 + extend_x);
1167  sign = 0 -(val & 1);
1168  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1169  } else
1170  *dmv_x = 0;
1171  if (extend_y)
1172  offs_tab = offset_table2;
1173  else
1174  offs_tab = offset_table1;
1175  index1 = (index + 1) / 9;
1176  if (index1 > v->numref) {
1177  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1178  sign = 0 - (val & 1);
1179  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1180  } else
1181  *dmv_y = 0;
1182  if (v->numref && pred_flag)
1183  *pred_flag = index1 & 1;
1184  }
1185 }
1186 
1187 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1188 {
1189  int scaledvalue, refdist;
1190  int scalesame1, scalesame2;
1191  int scalezone1_x, zone1offset_x;
1192  int table_index = dir ^ v->second_field;
1193 
1194  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1195  refdist = v->refdist;
1196  else
1197  refdist = dir ? v->brfd : v->frfd;
1198  if (refdist > 3)
1199  refdist = 3;
1200  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1201  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1202  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1203  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1204 
1205  if (FFABS(n) > 255)
1206  scaledvalue = n;
1207  else {
1208  if (FFABS(n) < scalezone1_x)
1209  scaledvalue = (n * scalesame1) >> 8;
1210  else {
1211  if (n < 0)
1212  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1213  else
1214  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1215  }
1216  }
1217  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1218 }
1219 
1220 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1221 {
1222  int scaledvalue, refdist;
1223  int scalesame1, scalesame2;
1224  int scalezone1_y, zone1offset_y;
1225  int table_index = dir ^ v->second_field;
1226 
1227  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1228  refdist = v->refdist;
1229  else
1230  refdist = dir ? v->brfd : v->frfd;
1231  if (refdist > 3)
1232  refdist = 3;
1233  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1234  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1235  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1236  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1237 
1238  if (FFABS(n) > 63)
1239  scaledvalue = n;
1240  else {
1241  if (FFABS(n) < scalezone1_y)
1242  scaledvalue = (n * scalesame1) >> 8;
1243  else {
1244  if (n < 0)
1245  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1246  else
1247  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1248  }
1249  }
1250 
1251  if (v->cur_field_type && !v->ref_field_type[dir])
1252  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1253  else
1254  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1255 }
1256 
1257 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1258 {
1259  int scalezone1_x, zone1offset_x;
1260  int scaleopp1, scaleopp2, brfd;
1261  int scaledvalue;
1262 
1263  brfd = FFMIN(v->brfd, 3);
1264  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1265  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1266  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1267  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1268 
1269  if (FFABS(n) > 255)
1270  scaledvalue = n;
1271  else {
1272  if (FFABS(n) < scalezone1_x)
1273  scaledvalue = (n * scaleopp1) >> 8;
1274  else {
1275  if (n < 0)
1276  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1277  else
1278  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1279  }
1280  }
1281  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1282 }
1283 
1284 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1285 {
1286  int scalezone1_y, zone1offset_y;
1287  int scaleopp1, scaleopp2, brfd;
1288  int scaledvalue;
1289 
1290  brfd = FFMIN(v->brfd, 3);
1291  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1292  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1293  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1294  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1295 
1296  if (FFABS(n) > 63)
1297  scaledvalue = n;
1298  else {
1299  if (FFABS(n) < scalezone1_y)
1300  scaledvalue = (n * scaleopp1) >> 8;
1301  else {
1302  if (n < 0)
1303  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1304  else
1305  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1306  }
1307  }
1308  if (v->cur_field_type && !v->ref_field_type[dir]) {
1309  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1310  } else {
1311  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1312  }
1313 }
1314 
1315 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1316  int dim, int dir)
1317 {
1318  int brfd, scalesame;
1319  int hpel = 1 - v->s.quarter_sample;
1320 
1321  n >>= hpel;
1322  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1323  if (dim)
1324  n = scaleforsame_y(v, i, n, dir) << hpel;
1325  else
1326  n = scaleforsame_x(v, n, dir) << hpel;
1327  return n;
1328  }
1329  brfd = FFMIN(v->brfd, 3);
1330  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1331 
1332  n = (n * scalesame >> 8) << hpel;
1333  return n;
1334 }
1335 
1336 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1337  int dim, int dir)
1338 {
1339  int refdist, scaleopp;
1340  int hpel = 1 - v->s.quarter_sample;
1341 
1342  n >>= hpel;
1343  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1344  if (dim)
1345  n = scaleforopp_y(v, n, dir) << hpel;
1346  else
1347  n = scaleforopp_x(v, n) << hpel;
1348  return n;
1349  }
1350  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1351  refdist = FFMIN(v->refdist, 3);
1352  else
1353  refdist = dir ? v->brfd : v->frfd;
1354  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1355 
1356  n = (n * scaleopp >> 8) << hpel;
1357  return n;
1358 }
1359 
1362 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1363  int mv1, int r_x, int r_y, uint8_t* is_intra,
1364  int pred_flag, int dir)
1365 {
1366  MpegEncContext *s = &v->s;
1367  int xy, wrap, off = 0;
1368  int16_t *A, *B, *C;
1369  int px, py;
1370  int sum;
1371  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1372  int opposite, a_f, b_f, c_f;
1373  int16_t field_predA[2];
1374  int16_t field_predB[2];
1375  int16_t field_predC[2];
1376  int a_valid, b_valid, c_valid;
1377  int hybridmv_thresh, y_bias = 0;
1378 
1379  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1381  mixedmv_pic = 1;
1382  else
1383  mixedmv_pic = 0;
1384  /* scale MV difference to be quad-pel */
1385  dmv_x <<= 1 - s->quarter_sample;
1386  dmv_y <<= 1 - s->quarter_sample;
1387 
1388  wrap = s->b8_stride;
1389  xy = s->block_index[n];
1390 
1391  if (s->mb_intra) {
1392  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1393  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1394  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1395  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1396  if (mv1) { /* duplicate motion data for 1-MV block */
1397  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1398  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1399  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1400  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1401  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1402  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1403  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1404  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1405  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1406  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1407  s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1408  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1409  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1410  }
1411  return;
1412  }
1413 
1414  C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1415  A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1416  if (mv1) {
1417  if (v->field_mode && mixedmv_pic)
1418  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1419  else
1420  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1421  } else {
1422  //in 4-MV mode different blocks have different B predictor position
1423  switch (n) {
1424  case 0:
1425  off = (s->mb_x > 0) ? -1 : 1;
1426  break;
1427  case 1:
1428  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1429  break;
1430  case 2:
1431  off = 1;
1432  break;
1433  case 3:
1434  off = -1;
1435  }
1436  }
1437  B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1438 
1439  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1440  b_valid = a_valid && (s->mb_width > 1);
1441  c_valid = s->mb_x || (n == 1 || n == 3);
1442  if (v->field_mode) {
1443  a_valid = a_valid && !is_intra[xy - wrap];
1444  b_valid = b_valid && !is_intra[xy - wrap + off];
1445  c_valid = c_valid && !is_intra[xy - 1];
1446  }
1447 
1448  if (a_valid) {
1449  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1450  num_oppfield += a_f;
1451  num_samefield += 1 - a_f;
1452  field_predA[0] = A[0];
1453  field_predA[1] = A[1];
1454  } else {
1455  field_predA[0] = field_predA[1] = 0;
1456  a_f = 0;
1457  }
1458  if (b_valid) {
1459  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1460  num_oppfield += b_f;
1461  num_samefield += 1 - b_f;
1462  field_predB[0] = B[0];
1463  field_predB[1] = B[1];
1464  } else {
1465  field_predB[0] = field_predB[1] = 0;
1466  b_f = 0;
1467  }
1468  if (c_valid) {
1469  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1470  num_oppfield += c_f;
1471  num_samefield += 1 - c_f;
1472  field_predC[0] = C[0];
1473  field_predC[1] = C[1];
1474  } else {
1475  field_predC[0] = field_predC[1] = 0;
1476  c_f = 0;
1477  }
1478 
1479  if (v->field_mode) {
1480  if (!v->numref)
1481  // REFFIELD determines if the last field or the second-last field is
1482  // to be used as reference
1483  opposite = 1 - v->reffield;
1484  else {
1485  if (num_samefield <= num_oppfield)
1486  opposite = 1 - pred_flag;
1487  else
1488  opposite = pred_flag;
1489  }
1490  } else
1491  opposite = 0;
1492  if (opposite) {
1493  if (a_valid && !a_f) {
1494  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1495  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1496  }
1497  if (b_valid && !b_f) {
1498  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1499  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1500  }
1501  if (c_valid && !c_f) {
1502  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1503  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1504  }
1505  v->mv_f[dir][xy + v->blocks_off] = 1;
1506  v->ref_field_type[dir] = !v->cur_field_type;
1507  } else {
1508  if (a_valid && a_f) {
1509  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1510  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1511  }
1512  if (b_valid && b_f) {
1513  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1514  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1515  }
1516  if (c_valid && c_f) {
1517  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1518  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1519  }
1520  v->mv_f[dir][xy + v->blocks_off] = 0;
1521  v->ref_field_type[dir] = v->cur_field_type;
1522  }
1523 
1524  if (a_valid) {
1525  px = field_predA[0];
1526  py = field_predA[1];
1527  } else if (c_valid) {
1528  px = field_predC[0];
1529  py = field_predC[1];
1530  } else if (b_valid) {
1531  px = field_predB[0];
1532  py = field_predB[1];
1533  } else {
1534  px = 0;
1535  py = 0;
1536  }
1537 
1538  if (num_samefield + num_oppfield > 1) {
1539  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1540  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1541  }
1542 
1543  /* Pullback MV as specified in 8.3.5.3.4 */
1544  if (!v->field_mode) {
1545  int qx, qy, X, Y;
1546  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1547  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1548  X = (s->mb_width << 6) - 4;
1549  Y = (s->mb_height << 6) - 4;
1550  if (mv1) {
1551  if (qx + px < -60) px = -60 - qx;
1552  if (qy + py < -60) py = -60 - qy;
1553  } else {
1554  if (qx + px < -28) px = -28 - qx;
1555  if (qy + py < -28) py = -28 - qy;
1556  }
1557  if (qx + px > X) px = X - qx;
1558  if (qy + py > Y) py = Y - qy;
1559  }
1560 
1561  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1562  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1563  hybridmv_thresh = 32;
1564  if (a_valid && c_valid) {
1565  if (is_intra[xy - wrap])
1566  sum = FFABS(px) + FFABS(py);
1567  else
1568  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1569  if (sum > hybridmv_thresh) {
1570  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1571  px = field_predA[0];
1572  py = field_predA[1];
1573  } else {
1574  px = field_predC[0];
1575  py = field_predC[1];
1576  }
1577  } else {
1578  if (is_intra[xy - 1])
1579  sum = FFABS(px) + FFABS(py);
1580  else
1581  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1582  if (sum > hybridmv_thresh) {
1583  if (get_bits1(&s->gb)) {
1584  px = field_predA[0];
1585  py = field_predA[1];
1586  } else {
1587  px = field_predC[0];
1588  py = field_predC[1];
1589  }
1590  }
1591  }
1592  }
1593  }
1594 
1595  if (v->field_mode && v->numref)
1596  r_y >>= 1;
1597  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1598  y_bias = 1;
1599  /* store MV using signed modulus of MV range defined in 4.11 */
1600  s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1601  s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1602  if (mv1) { /* duplicate motion data for 1-MV block */
1603  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1604  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1605  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1606  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1607  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1608  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1609  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1610  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1611  }
1612 }
1613 
1616 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1617  int mvn, int r_x, int r_y, uint8_t* is_intra)
1618 {
1619  MpegEncContext *s = &v->s;
1620  int xy, wrap, off = 0;
1621  int A[2], B[2], C[2];
1622  int px, py;
1623  int a_valid = 0, b_valid = 0, c_valid = 0;
1624  int field_a, field_b, field_c; // 0: same, 1: opposit
1625  int total_valid, num_samefield, num_oppfield;
1626  int pos_c, pos_b, n_adj;
1627 
1628  wrap = s->b8_stride;
1629  xy = s->block_index[n];
1630 
1631  if (s->mb_intra) {
1632  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1633  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1634  s->current_picture.f.motion_val[1][xy][0] = 0;
1635  s->current_picture.f.motion_val[1][xy][1] = 0;
1636  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1637  s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1638  s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1639  s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1640  s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1641  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1642  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1643  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1644  s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1645  s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1646  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1647  s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1648  s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1649  s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1650  }
1651  return;
1652  }
1653 
1654  off = ((n == 0) || (n == 1)) ? 1 : -1;
1655  /* predict A */
1656  if (s->mb_x || (n == 1) || (n == 3)) {
1657  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1658  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1659  A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1660  A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1661  a_valid = 1;
1662  } else { // current block has frame mv and cand. has field MV (so average)
1663  A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1664  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1665  A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1666  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1667  a_valid = 1;
1668  }
1669  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1670  a_valid = 0;
1671  A[0] = A[1] = 0;
1672  }
1673  } else
1674  A[0] = A[1] = 0;
1675  /* Predict B and C */
1676  B[0] = B[1] = C[0] = C[1] = 0;
1677  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1678  if (!s->first_slice_line) {
1679  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1680  b_valid = 1;
1681  n_adj = n | 2;
1682  pos_b = s->block_index[n_adj] - 2 * wrap;
1683  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1684  n_adj = (n & 2) | (n & 1);
1685  }
1686  B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1687  B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1688  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1689  B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1690  B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1691  }
1692  }
1693  if (s->mb_width > 1) {
1694  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1695  c_valid = 1;
1696  n_adj = 2;
1697  pos_c = s->block_index[2] - 2 * wrap + 2;
1698  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1699  n_adj = n & 2;
1700  }
1701  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1702  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1703  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1704  C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1705  C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1706  }
1707  if (s->mb_x == s->mb_width - 1) {
1708  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1709  c_valid = 1;
1710  n_adj = 3;
1711  pos_c = s->block_index[3] - 2 * wrap - 2;
1712  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1713  n_adj = n | 1;
1714  }
1715  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1716  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1717  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1718  C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1719  C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1720  }
1721  } else
1722  c_valid = 0;
1723  }
1724  }
1725  }
1726  }
1727  } else {
1728  pos_b = s->block_index[1];
1729  b_valid = 1;
1730  B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1731  B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1732  pos_c = s->block_index[0];
1733  c_valid = 1;
1734  C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1735  C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1736  }
1737 
1738  total_valid = a_valid + b_valid + c_valid;
1739  // check if predictor A is out of bounds
1740  if (!s->mb_x && !(n == 1 || n == 3)) {
1741  A[0] = A[1] = 0;
1742  }
1743  // check if predictor B is out of bounds
1744  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1745  B[0] = B[1] = C[0] = C[1] = 0;
1746  }
1747  if (!v->blk_mv_type[xy]) {
1748  if (s->mb_width == 1) {
1749  px = B[0];
1750  py = B[1];
1751  } else {
1752  if (total_valid >= 2) {
1753  px = mid_pred(A[0], B[0], C[0]);
1754  py = mid_pred(A[1], B[1], C[1]);
1755  } else if (total_valid) {
1756  if (a_valid) { px = A[0]; py = A[1]; }
1757  if (b_valid) { px = B[0]; py = B[1]; }
1758  if (c_valid) { px = C[0]; py = C[1]; }
1759  } else
1760  px = py = 0;
1761  }
1762  } else {
1763  if (a_valid)
1764  field_a = (A[1] & 4) ? 1 : 0;
1765  else
1766  field_a = 0;
1767  if (b_valid)
1768  field_b = (B[1] & 4) ? 1 : 0;
1769  else
1770  field_b = 0;
1771  if (c_valid)
1772  field_c = (C[1] & 4) ? 1 : 0;
1773  else
1774  field_c = 0;
1775 
1776  num_oppfield = field_a + field_b + field_c;
1777  num_samefield = total_valid - num_oppfield;
1778  if (total_valid == 3) {
1779  if ((num_samefield == 3) || (num_oppfield == 3)) {
1780  px = mid_pred(A[0], B[0], C[0]);
1781  py = mid_pred(A[1], B[1], C[1]);
1782  } else if (num_samefield >= num_oppfield) {
1783  /* take one MV from same field set depending on priority
1784  the check for B may not be necessary */
1785  px = !field_a ? A[0] : B[0];
1786  py = !field_a ? A[1] : B[1];
1787  } else {
1788  px = field_a ? A[0] : B[0];
1789  py = field_a ? A[1] : B[1];
1790  }
1791  } else if (total_valid == 2) {
1792  if (num_samefield >= num_oppfield) {
1793  if (!field_a && a_valid) {
1794  px = A[0];
1795  py = A[1];
1796  } else if (!field_b && b_valid) {
1797  px = B[0];
1798  py = B[1];
1799  } else if (c_valid) {
1800  px = C[0];
1801  py = C[1];
1802  } else px = py = 0;
1803  } else {
1804  if (field_a && a_valid) {
1805  px = A[0];
1806  py = A[1];
1807  } else if (field_b && b_valid) {
1808  px = B[0];
1809  py = B[1];
1810  } else if (c_valid) {
1811  px = C[0];
1812  py = C[1];
1813  }
1814  }
1815  } else if (total_valid == 1) {
1816  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1817  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1818  } else
1819  px = py = 0;
1820  }
1821 
1822  /* store MV using signed modulus of MV range defined in 4.11 */
1823  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1824  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1825  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1826  s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1827  s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1828  s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1829  s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1830  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1831  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1832  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1833  s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1834  s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1835  s->mv[0][n + 1][0] = s->mv[0][n][0];
1836  s->mv[0][n + 1][1] = s->mv[0][n][1];
1837  }
1838 }
1839 
1842 static void vc1_interp_mc(VC1Context *v)
1843 {
1844  MpegEncContext *s = &v->s;
1845  DSPContext *dsp = &v->s.dsp;
1846  uint8_t *srcY, *srcU, *srcV;
1847  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1848  int off, off_uv;
1849  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1850 
1851  if (!v->field_mode && !v->s.next_picture.f.data[0])
1852  return;
1853 
1854  mx = s->mv[1][0][0];
1855  my = s->mv[1][0][1];
1856  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1857  uvmy = (my + ((my & 3) == 3)) >> 1;
1858  if (v->field_mode) {
1859  if (v->cur_field_type != v->ref_field_type[1])
1860  my = my - 2 + 4 * v->cur_field_type;
1861  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1862  }
1863  if (v->fastuvmc) {
1864  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1865  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1866  }
1867  srcY = s->next_picture.f.data[0];
1868  srcU = s->next_picture.f.data[1];
1869  srcV = s->next_picture.f.data[2];
1870 
1871  src_x = s->mb_x * 16 + (mx >> 2);
1872  src_y = s->mb_y * 16 + (my >> 2);
1873  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1874  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1875 
1876  if (v->profile != PROFILE_ADVANCED) {
1877  src_x = av_clip( src_x, -16, s->mb_width * 16);
1878  src_y = av_clip( src_y, -16, s->mb_height * 16);
1879  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1880  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1881  } else {
1882  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1883  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1884  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1885  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1886  }
1887 
1888  srcY += src_y * s->linesize + src_x;
1889  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1890  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1891 
1892  if (v->field_mode && v->ref_field_type[1]) {
1893  srcY += s->current_picture_ptr->f.linesize[0];
1894  srcU += s->current_picture_ptr->f.linesize[1];
1895  srcV += s->current_picture_ptr->f.linesize[2];
1896  }
1897 
1898  /* for grayscale we should not try to read from unknown area */
1899  if (s->flags & CODEC_FLAG_GRAY) {
1900  srcU = s->edge_emu_buffer + 18 * s->linesize;
1901  srcV = s->edge_emu_buffer + 18 * s->linesize;
1902  }
1903 
1904  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1905  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1906  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1907  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1908 
1909  srcY -= s->mspel * (1 + s->linesize);
1911  17 + s->mspel * 2, 17 + s->mspel * 2,
1912  src_x - s->mspel, src_y - s->mspel,
1913  s->h_edge_pos, v_edge_pos);
1914  srcY = s->edge_emu_buffer;
1915  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1916  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1917  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1918  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1919  srcU = uvbuf;
1920  srcV = uvbuf + 16;
1921  /* if we deal with range reduction we need to scale source blocks */
1922  if (v->rangeredfrm) {
1923  int i, j;
1924  uint8_t *src, *src2;
1925 
1926  src = srcY;
1927  for (j = 0; j < 17 + s->mspel * 2; j++) {
1928  for (i = 0; i < 17 + s->mspel * 2; i++)
1929  src[i] = ((src[i] - 128) >> 1) + 128;
1930  src += s->linesize;
1931  }
1932  src = srcU;
1933  src2 = srcV;
1934  for (j = 0; j < 9; j++) {
1935  for (i = 0; i < 9; i++) {
1936  src[i] = ((src[i] - 128) >> 1) + 128;
1937  src2[i] = ((src2[i] - 128) >> 1) + 128;
1938  }
1939  src += s->uvlinesize;
1940  src2 += s->uvlinesize;
1941  }
1942  }
1943  srcY += s->mspel * (1 + s->linesize);
1944  }
1945 
1946  if (v->field_mode && v->cur_field_type) {
1947  off = s->current_picture_ptr->f.linesize[0];
1948  off_uv = s->current_picture_ptr->f.linesize[1];
1949  } else {
1950  off = 0;
1951  off_uv = 0;
1952  }
1953 
1954  if (s->mspel) {
1955  dxy = ((my & 3) << 2) | (mx & 3);
1956  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1957  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1958  srcY += s->linesize * 8;
1959  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1960  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1961  } else { // hpel mc
1962  dxy = (my & 2) | ((mx & 2) >> 1);
1963 
1964  if (!v->rnd)
1965  dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1966  else
1967  dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1968  }
1969 
1970  if (s->flags & CODEC_FLAG_GRAY) return;
1971  /* Chroma MC always uses qpel blilinear */
1972  uvmx = (uvmx & 3) << 1;
1973  uvmy = (uvmy & 3) << 1;
1974  if (!v->rnd) {
1975  dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1976  dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1977  } else {
1978  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1979  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1980  }
1981 }
1982 
1983 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1984 {
1985  int n = bfrac;
1986 
1987 #if B_FRACTION_DEN==256
1988  if (inv)
1989  n -= 256;
1990  if (!qs)
1991  return 2 * ((value * n + 255) >> 9);
1992  return (value * n + 128) >> 8;
1993 #else
1994  if (inv)
1995  n -= B_FRACTION_DEN;
1996  if (!qs)
1997  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1998  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1999 #endif
2000 }
2001 
2004 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2005  int direct, int mode)
2006 {
2007  if (v->use_ic) {
2008  v->mv_mode2 = v->mv_mode;
2010  }
2011  if (direct) {
2012  vc1_mc_1mv(v, 0);
2013  vc1_interp_mc(v);
2014  if (v->use_ic)
2015  v->mv_mode = v->mv_mode2;
2016  return;
2017  }
2018  if (mode == BMV_TYPE_INTERPOLATED) {
2019  vc1_mc_1mv(v, 0);
2020  vc1_interp_mc(v);
2021  if (v->use_ic)
2022  v->mv_mode = v->mv_mode2;
2023  return;
2024  }
2025 
2026  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2027  v->mv_mode = v->mv_mode2;
2028  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2029  if (v->use_ic)
2030  v->mv_mode = v->mv_mode2;
2031 }
2032 
2033 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2034  int direct, int mvtype)
2035 {
2036  MpegEncContext *s = &v->s;
2037  int xy, wrap, off = 0;
2038  int16_t *A, *B, *C;
2039  int px, py;
2040  int sum;
2041  int r_x, r_y;
2042  const uint8_t *is_intra = v->mb_type[0];
2043 
2044  r_x = v->range_x;
2045  r_y = v->range_y;
2046  /* scale MV difference to be quad-pel */
2047  dmv_x[0] <<= 1 - s->quarter_sample;
2048  dmv_y[0] <<= 1 - s->quarter_sample;
2049  dmv_x[1] <<= 1 - s->quarter_sample;
2050  dmv_y[1] <<= 1 - s->quarter_sample;
2051 
2052  wrap = s->b8_stride;
2053  xy = s->block_index[0];
2054 
2055  if (s->mb_intra) {
2056  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2057  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2058  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2059  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2060  return;
2061  }
2062  if (!v->field_mode) {
2063  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2064  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2065  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2066  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2067 
2068  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2069  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2070  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2071  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2072  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2073  }
2074  if (direct) {
2075  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2076  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2077  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2078  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2079  return;
2080  }
2081 
2082  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2083  C = s->current_picture.f.motion_val[0][xy - 2];
2084  A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2085  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2086  B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2087 
2088  if (!s->mb_x) C[0] = C[1] = 0;
2089  if (!s->first_slice_line) { // predictor A is not out of bounds
2090  if (s->mb_width == 1) {
2091  px = A[0];
2092  py = A[1];
2093  } else {
2094  px = mid_pred(A[0], B[0], C[0]);
2095  py = mid_pred(A[1], B[1], C[1]);
2096  }
2097  } else if (s->mb_x) { // predictor C is not out of bounds
2098  px = C[0];
2099  py = C[1];
2100  } else {
2101  px = py = 0;
2102  }
2103  /* Pullback MV as specified in 8.3.5.3.4 */
2104  {
2105  int qx, qy, X, Y;
2106  if (v->profile < PROFILE_ADVANCED) {
2107  qx = (s->mb_x << 5);
2108  qy = (s->mb_y << 5);
2109  X = (s->mb_width << 5) - 4;
2110  Y = (s->mb_height << 5) - 4;
2111  if (qx + px < -28) px = -28 - qx;
2112  if (qy + py < -28) py = -28 - qy;
2113  if (qx + px > X) px = X - qx;
2114  if (qy + py > Y) py = Y - qy;
2115  } else {
2116  qx = (s->mb_x << 6);
2117  qy = (s->mb_y << 6);
2118  X = (s->mb_width << 6) - 4;
2119  Y = (s->mb_height << 6) - 4;
2120  if (qx + px < -60) px = -60 - qx;
2121  if (qy + py < -60) py = -60 - qy;
2122  if (qx + px > X) px = X - qx;
2123  if (qy + py > Y) py = Y - qy;
2124  }
2125  }
2126  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2127  if (0 && !s->first_slice_line && s->mb_x) {
2128  if (is_intra[xy - wrap])
2129  sum = FFABS(px) + FFABS(py);
2130  else
2131  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2132  if (sum > 32) {
2133  if (get_bits1(&s->gb)) {
2134  px = A[0];
2135  py = A[1];
2136  } else {
2137  px = C[0];
2138  py = C[1];
2139  }
2140  } else {
2141  if (is_intra[xy - 2])
2142  sum = FFABS(px) + FFABS(py);
2143  else
2144  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2145  if (sum > 32) {
2146  if (get_bits1(&s->gb)) {
2147  px = A[0];
2148  py = A[1];
2149  } else {
2150  px = C[0];
2151  py = C[1];
2152  }
2153  }
2154  }
2155  }
2156  /* store MV using signed modulus of MV range defined in 4.11 */
2157  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2158  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2159  }
2160  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2161  C = s->current_picture.f.motion_val[1][xy - 2];
2162  A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2163  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2164  B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2165 
2166  if (!s->mb_x)
2167  C[0] = C[1] = 0;
2168  if (!s->first_slice_line) { // predictor A is not out of bounds
2169  if (s->mb_width == 1) {
2170  px = A[0];
2171  py = A[1];
2172  } else {
2173  px = mid_pred(A[0], B[0], C[0]);
2174  py = mid_pred(A[1], B[1], C[1]);
2175  }
2176  } else if (s->mb_x) { // predictor C is not out of bounds
2177  px = C[0];
2178  py = C[1];
2179  } else {
2180  px = py = 0;
2181  }
2182  /* Pullback MV as specified in 8.3.5.3.4 */
2183  {
2184  int qx, qy, X, Y;
2185  if (v->profile < PROFILE_ADVANCED) {
2186  qx = (s->mb_x << 5);
2187  qy = (s->mb_y << 5);
2188  X = (s->mb_width << 5) - 4;
2189  Y = (s->mb_height << 5) - 4;
2190  if (qx + px < -28) px = -28 - qx;
2191  if (qy + py < -28) py = -28 - qy;
2192  if (qx + px > X) px = X - qx;
2193  if (qy + py > Y) py = Y - qy;
2194  } else {
2195  qx = (s->mb_x << 6);
2196  qy = (s->mb_y << 6);
2197  X = (s->mb_width << 6) - 4;
2198  Y = (s->mb_height << 6) - 4;
2199  if (qx + px < -60) px = -60 - qx;
2200  if (qy + py < -60) py = -60 - qy;
2201  if (qx + px > X) px = X - qx;
2202  if (qy + py > Y) py = Y - qy;
2203  }
2204  }
2205  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2206  if (0 && !s->first_slice_line && s->mb_x) {
2207  if (is_intra[xy - wrap])
2208  sum = FFABS(px) + FFABS(py);
2209  else
2210  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2211  if (sum > 32) {
2212  if (get_bits1(&s->gb)) {
2213  px = A[0];
2214  py = A[1];
2215  } else {
2216  px = C[0];
2217  py = C[1];
2218  }
2219  } else {
2220  if (is_intra[xy - 2])
2221  sum = FFABS(px) + FFABS(py);
2222  else
2223  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2224  if (sum > 32) {
2225  if (get_bits1(&s->gb)) {
2226  px = A[0];
2227  py = A[1];
2228  } else {
2229  px = C[0];
2230  py = C[1];
2231  }
2232  }
2233  }
2234  }
2235  /* store MV using signed modulus of MV range defined in 4.11 */
2236 
2237  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2238  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2239  }
2240  s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2241  s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2242  s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2243  s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2244 }
2245 
2246 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2247 {
2248  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2249  MpegEncContext *s = &v->s;
2250  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2251 
2252  if (v->bmvtype == BMV_TYPE_DIRECT) {
2253  int total_opp, k, f;
2254  if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2255  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2256  v->bfraction, 0, s->quarter_sample);
2257  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2258  v->bfraction, 0, s->quarter_sample);
2259  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2260  v->bfraction, 1, s->quarter_sample);
2261  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2262  v->bfraction, 1, s->quarter_sample);
2263 
2264  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2265  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2266  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2267  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2268  f = (total_opp > 2) ? 1 : 0;
2269  } else {
2270  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2271  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2272  f = 0;
2273  }
2274  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2275  for (k = 0; k < 4; k++) {
2276  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2277  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2278  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2279  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2280  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2281  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2282  }
2283  return;
2284  }
2285  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2286  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2287  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2288  return;
2289  }
2290  if (dir) { // backward
2291  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2292  if (n == 3 || mv1) {
2293  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2294  }
2295  } else { // forward
2296  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2297  if (n == 3 || mv1) {
2298  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2299  }
2300  }
2301 }
2302 
2312 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2313  int16_t **dc_val_ptr, int *dir_ptr)
2314 {
2315  int a, b, c, wrap, pred, scale;
2316  int16_t *dc_val;
2317  static const uint16_t dcpred[32] = {
2318  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2319  114, 102, 93, 85, 79, 73, 68, 64,
2320  60, 57, 54, 51, 49, 47, 45, 43,
2321  41, 39, 38, 37, 35, 34, 33
2322  };
2323 
2324  /* find prediction - wmv3_dc_scale always used here in fact */
2325  if (n < 4) scale = s->y_dc_scale;
2326  else scale = s->c_dc_scale;
2327 
2328  wrap = s->block_wrap[n];
2329  dc_val = s->dc_val[0] + s->block_index[n];
2330 
2331  /* B A
2332  * C X
2333  */
2334  c = dc_val[ - 1];
2335  b = dc_val[ - 1 - wrap];
2336  a = dc_val[ - wrap];
2337 
2338  if (pq < 9 || !overlap) {
2339  /* Set outer values */
2340  if (s->first_slice_line && (n != 2 && n != 3))
2341  b = a = dcpred[scale];
2342  if (s->mb_x == 0 && (n != 1 && n != 3))
2343  b = c = dcpred[scale];
2344  } else {
2345  /* Set outer values */
2346  if (s->first_slice_line && (n != 2 && n != 3))
2347  b = a = 0;
2348  if (s->mb_x == 0 && (n != 1 && n != 3))
2349  b = c = 0;
2350  }
2351 
2352  if (abs(a - b) <= abs(b - c)) {
2353  pred = c;
2354  *dir_ptr = 1; // left
2355  } else {
2356  pred = a;
2357  *dir_ptr = 0; // top
2358  }
2359 
2360  /* update predictor */
2361  *dc_val_ptr = &dc_val[0];
2362  return pred;
2363 }
2364 
2365 
2377 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2378  int a_avail, int c_avail,
2379  int16_t **dc_val_ptr, int *dir_ptr)
2380 {
2381  int a, b, c, wrap, pred;
2382  int16_t *dc_val;
2383  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2384  int q1, q2 = 0;
2385  int dqscale_index;
2386 
2387  wrap = s->block_wrap[n];
2388  dc_val = s->dc_val[0] + s->block_index[n];
2389 
2390  /* B A
2391  * C X
2392  */
2393  c = dc_val[ - 1];
2394  b = dc_val[ - 1 - wrap];
2395  a = dc_val[ - wrap];
2396  /* scale predictors if needed */
2397  q1 = s->current_picture.f.qscale_table[mb_pos];
2398  dqscale_index = s->y_dc_scale_table[q1] - 1;
2399  if (dqscale_index < 0)
2400  return 0;
2401  if (c_avail && (n != 1 && n != 3)) {
2402  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2403  if (q2 && q2 != q1)
2404  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2405  }
2406  if (a_avail && (n != 2 && n != 3)) {
2407  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2408  if (q2 && q2 != q1)
2409  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2410  }
2411  if (a_avail && c_avail && (n != 3)) {
2412  int off = mb_pos;
2413  if (n != 1)
2414  off--;
2415  if (n != 2)
2416  off -= s->mb_stride;
2417  q2 = s->current_picture.f.qscale_table[off];
2418  if (q2 && q2 != q1)
2419  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2420  }
2421 
2422  if (a_avail && c_avail) {
2423  if (abs(a - b) <= abs(b - c)) {
2424  pred = c;
2425  *dir_ptr = 1; // left
2426  } else {
2427  pred = a;
2428  *dir_ptr = 0; // top
2429  }
2430  } else if (a_avail) {
2431  pred = a;
2432  *dir_ptr = 0; // top
2433  } else if (c_avail) {
2434  pred = c;
2435  *dir_ptr = 1; // left
2436  } else {
2437  pred = 0;
2438  *dir_ptr = 1; // left
2439  }
2440 
2441  /* update predictor */
2442  *dc_val_ptr = &dc_val[0];
2443  return pred;
2444 }
2445  // Block group
2447 
2454 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2455  uint8_t **coded_block_ptr)
2456 {
2457  int xy, wrap, pred, a, b, c;
2458 
2459  xy = s->block_index[n];
2460  wrap = s->b8_stride;
2461 
2462  /* B C
2463  * A X
2464  */
2465  a = s->coded_block[xy - 1 ];
2466  b = s->coded_block[xy - 1 - wrap];
2467  c = s->coded_block[xy - wrap];
2468 
2469  if (b == c) {
2470  pred = a;
2471  } else {
2472  pred = c;
2473  }
2474 
2475  /* store value */
2476  *coded_block_ptr = &s->coded_block[xy];
2477 
2478  return pred;
2479 }
2480 
2490 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2491  int *value, int codingset)
2492 {
2493  GetBitContext *gb = &v->s.gb;
2494  int index, escape, run = 0, level = 0, lst = 0;
2495 
2496  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2497  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2498  run = vc1_index_decode_table[codingset][index][0];
2499  level = vc1_index_decode_table[codingset][index][1];
2500  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2501  if (get_bits1(gb))
2502  level = -level;
2503  } else {
2504  escape = decode210(gb);
2505  if (escape != 2) {
2506  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2507  run = vc1_index_decode_table[codingset][index][0];
2508  level = vc1_index_decode_table[codingset][index][1];
2509  lst = index >= vc1_last_decode_table[codingset];
2510  if (escape == 0) {
2511  if (lst)
2512  level += vc1_last_delta_level_table[codingset][run];
2513  else
2514  level += vc1_delta_level_table[codingset][run];
2515  } else {
2516  if (lst)
2517  run += vc1_last_delta_run_table[codingset][level] + 1;
2518  else
2519  run += vc1_delta_run_table[codingset][level] + 1;
2520  }
2521  if (get_bits1(gb))
2522  level = -level;
2523  } else {
2524  int sign;
2525  lst = get_bits1(gb);
2526  if (v->s.esc3_level_length == 0) {
2527  if (v->pq < 8 || v->dquantfrm) { // table 59
2528  v->s.esc3_level_length = get_bits(gb, 3);
2529  if (!v->s.esc3_level_length)
2530  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2531  } else { // table 60
2532  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2533  }
2534  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2535  }
2536  run = get_bits(gb, v->s.esc3_run_length);
2537  sign = get_bits1(gb);
2538  level = get_bits(gb, v->s.esc3_level_length);
2539  if (sign)
2540  level = -level;
2541  }
2542  }
2543 
2544  *last = lst;
2545  *skip = run;
2546  *value = level;
2547 }
2548 
2556 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2557  int coded, int codingset)
2558 {
2559  GetBitContext *gb = &v->s.gb;
2560  MpegEncContext *s = &v->s;
2561  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2562  int i;
2563  int16_t *dc_val;
2564  int16_t *ac_val, *ac_val2;
2565  int dcdiff;
2566 
2567  /* Get DC differential */
2568  if (n < 4) {
2570  } else {
2572  }
2573  if (dcdiff < 0) {
2574  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2575  return -1;
2576  }
2577  if (dcdiff) {
2578  if (dcdiff == 119 /* ESC index value */) {
2579  /* TODO: Optimize */
2580  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2581  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2582  else dcdiff = get_bits(gb, 8);
2583  } else {
2584  if (v->pq == 1)
2585  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2586  else if (v->pq == 2)
2587  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2588  }
2589  if (get_bits1(gb))
2590  dcdiff = -dcdiff;
2591  }
2592 
2593  /* Prediction */
2594  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2595  *dc_val = dcdiff;
2596 
2597  /* Store the quantized DC coeff, used for prediction */
2598  if (n < 4) {
2599  block[0] = dcdiff * s->y_dc_scale;
2600  } else {
2601  block[0] = dcdiff * s->c_dc_scale;
2602  }
2603  /* Skip ? */
2604  if (!coded) {
2605  goto not_coded;
2606  }
2607 
2608  // AC Decoding
2609  i = 1;
2610 
2611  {
2612  int last = 0, skip, value;
2613  const uint8_t *zz_table;
2614  int scale;
2615  int k;
2616 
2617  scale = v->pq * 2 + v->halfpq;
2618 
2619  if (v->s.ac_pred) {
2620  if (!dc_pred_dir)
2621  zz_table = v->zz_8x8[2];
2622  else
2623  zz_table = v->zz_8x8[3];
2624  } else
2625  zz_table = v->zz_8x8[1];
2626 
2627  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2628  ac_val2 = ac_val;
2629  if (dc_pred_dir) // left
2630  ac_val -= 16;
2631  else // top
2632  ac_val -= 16 * s->block_wrap[n];
2633 
2634  while (!last) {
2635  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2636  i += skip;
2637  if (i > 63)
2638  break;
2639  block[zz_table[i++]] = value;
2640  }
2641 
2642  /* apply AC prediction if needed */
2643  if (s->ac_pred) {
2644  if (dc_pred_dir) { // left
2645  for (k = 1; k < 8; k++)
2646  block[k << v->left_blk_sh] += ac_val[k];
2647  } else { // top
2648  for (k = 1; k < 8; k++)
2649  block[k << v->top_blk_sh] += ac_val[k + 8];
2650  }
2651  }
2652  /* save AC coeffs for further prediction */
2653  for (k = 1; k < 8; k++) {
2654  ac_val2[k] = block[k << v->left_blk_sh];
2655  ac_val2[k + 8] = block[k << v->top_blk_sh];
2656  }
2657 
2658  /* scale AC coeffs */
2659  for (k = 1; k < 64; k++)
2660  if (block[k]) {
2661  block[k] *= scale;
2662  if (!v->pquantizer)
2663  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2664  }
2665 
2666  if (s->ac_pred) i = 63;
2667  }
2668 
2669 not_coded:
2670  if (!coded) {
2671  int k, scale;
2672  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2673  ac_val2 = ac_val;
2674 
2675  i = 0;
2676  scale = v->pq * 2 + v->halfpq;
2677  memset(ac_val2, 0, 16 * 2);
2678  if (dc_pred_dir) { // left
2679  ac_val -= 16;
2680  if (s->ac_pred)
2681  memcpy(ac_val2, ac_val, 8 * 2);
2682  } else { // top
2683  ac_val -= 16 * s->block_wrap[n];
2684  if (s->ac_pred)
2685  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2686  }
2687 
2688  /* apply AC prediction if needed */
2689  if (s->ac_pred) {
2690  if (dc_pred_dir) { //left
2691  for (k = 1; k < 8; k++) {
2692  block[k << v->left_blk_sh] = ac_val[k] * scale;
2693  if (!v->pquantizer && block[k << v->left_blk_sh])
2694  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2695  }
2696  } else { // top
2697  for (k = 1; k < 8; k++) {
2698  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2699  if (!v->pquantizer && block[k << v->top_blk_sh])
2700  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2701  }
2702  }
2703  i = 63;
2704  }
2705  }
2706  s->block_last_index[n] = i;
2707 
2708  return 0;
2709 }
2710 
2720  int coded, int codingset, int mquant)
2721 {
2722  GetBitContext *gb = &v->s.gb;
2723  MpegEncContext *s = &v->s;
2724  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2725  int i;
2726  int16_t *dc_val;
2727  int16_t *ac_val, *ac_val2;
2728  int dcdiff;
2729  int a_avail = v->a_avail, c_avail = v->c_avail;
2730  int use_pred = s->ac_pred;
2731  int scale;
2732  int q1, q2 = 0;
2733  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2734 
2735  /* Get DC differential */
2736  if (n < 4) {
2738  } else {
2740  }
2741  if (dcdiff < 0) {
2742  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2743  return -1;
2744  }
2745  if (dcdiff) {
2746  if (dcdiff == 119 /* ESC index value */) {
2747  /* TODO: Optimize */
2748  if (mquant == 1) dcdiff = get_bits(gb, 10);
2749  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2750  else dcdiff = get_bits(gb, 8);
2751  } else {
2752  if (mquant == 1)
2753  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2754  else if (mquant == 2)
2755  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2756  }
2757  if (get_bits1(gb))
2758  dcdiff = -dcdiff;
2759  }
2760 
2761  /* Prediction */
2762  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2763  *dc_val = dcdiff;
2764 
2765  /* Store the quantized DC coeff, used for prediction */
2766  if (n < 4) {
2767  block[0] = dcdiff * s->y_dc_scale;
2768  } else {
2769  block[0] = dcdiff * s->c_dc_scale;
2770  }
2771 
2772  //AC Decoding
2773  i = 1;
2774 
2775  /* check if AC is needed at all */
2776  if (!a_avail && !c_avail)
2777  use_pred = 0;
2778  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2779  ac_val2 = ac_val;
2780 
2781  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2782 
2783  if (dc_pred_dir) // left
2784  ac_val -= 16;
2785  else // top
2786  ac_val -= 16 * s->block_wrap[n];
2787 
2788  q1 = s->current_picture.f.qscale_table[mb_pos];
2789  if ( dc_pred_dir && c_avail && mb_pos)
2790  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2791  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2792  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2793  if ( dc_pred_dir && n == 1)
2794  q2 = q1;
2795  if (!dc_pred_dir && n == 2)
2796  q2 = q1;
2797  if (n == 3)
2798  q2 = q1;
2799 
2800  if (coded) {
2801  int last = 0, skip, value;
2802  const uint8_t *zz_table;
2803  int k;
2804 
2805  if (v->s.ac_pred) {
2806  if (!use_pred && v->fcm == ILACE_FRAME) {
2807  zz_table = v->zzi_8x8;
2808  } else {
2809  if (!dc_pred_dir) // top
2810  zz_table = v->zz_8x8[2];
2811  else // left
2812  zz_table = v->zz_8x8[3];
2813  }
2814  } else {
2815  if (v->fcm != ILACE_FRAME)
2816  zz_table = v->zz_8x8[1];
2817  else
2818  zz_table = v->zzi_8x8;
2819  }
2820 
2821  while (!last) {
2822  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2823  i += skip;
2824  if (i > 63)
2825  break;
2826  block[zz_table[i++]] = value;
2827  }
2828 
2829  /* apply AC prediction if needed */
2830  if (use_pred) {
2831  /* scale predictors if needed*/
2832  if (q2 && q1 != q2) {
2833  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2834  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2835 
2836  if (q1 < 1)
2837  return AVERROR_INVALIDDATA;
2838  if (dc_pred_dir) { // left
2839  for (k = 1; k < 8; k++)
2840  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2841  } else { // top
2842  for (k = 1; k < 8; k++)
2843  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2844  }
2845  } else {
2846  if (dc_pred_dir) { //left
2847  for (k = 1; k < 8; k++)
2848  block[k << v->left_blk_sh] += ac_val[k];
2849  } else { //top
2850  for (k = 1; k < 8; k++)
2851  block[k << v->top_blk_sh] += ac_val[k + 8];
2852  }
2853  }
2854  }
2855  /* save AC coeffs for further prediction */
2856  for (k = 1; k < 8; k++) {
2857  ac_val2[k ] = block[k << v->left_blk_sh];
2858  ac_val2[k + 8] = block[k << v->top_blk_sh];
2859  }
2860 
2861  /* scale AC coeffs */
2862  for (k = 1; k < 64; k++)
2863  if (block[k]) {
2864  block[k] *= scale;
2865  if (!v->pquantizer)
2866  block[k] += (block[k] < 0) ? -mquant : mquant;
2867  }
2868 
2869  if (use_pred) i = 63;
2870  } else { // no AC coeffs
2871  int k;
2872 
2873  memset(ac_val2, 0, 16 * 2);
2874  if (dc_pred_dir) { // left
2875  if (use_pred) {
2876  memcpy(ac_val2, ac_val, 8 * 2);
2877  if (q2 && q1 != q2) {
2878  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2879  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2880  if (q1 < 1)
2881  return AVERROR_INVALIDDATA;
2882  for (k = 1; k < 8; k++)
2883  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2884  }
2885  }
2886  } else { // top
2887  if (use_pred) {
2888  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2889  if (q2 && q1 != q2) {
2890  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2891  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2892  if (q1 < 1)
2893  return AVERROR_INVALIDDATA;
2894  for (k = 1; k < 8; k++)
2895  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2896  }
2897  }
2898  }
2899 
2900  /* apply AC prediction if needed */
2901  if (use_pred) {
2902  if (dc_pred_dir) { // left
2903  for (k = 1; k < 8; k++) {
2904  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2905  if (!v->pquantizer && block[k << v->left_blk_sh])
2906  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2907  }
2908  } else { // top
2909  for (k = 1; k < 8; k++) {
2910  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2911  if (!v->pquantizer && block[k << v->top_blk_sh])
2912  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2913  }
2914  }
2915  i = 63;
2916  }
2917  }
2918  s->block_last_index[n] = i;
2919 
2920  return 0;
2921 }
2922 
2932  int coded, int mquant, int codingset)
2933 {
2934  GetBitContext *gb = &v->s.gb;
2935  MpegEncContext *s = &v->s;
2936  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2937  int i;
2938  int16_t *dc_val;
2939  int16_t *ac_val, *ac_val2;
2940  int dcdiff;
2941  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2942  int a_avail = v->a_avail, c_avail = v->c_avail;
2943  int use_pred = s->ac_pred;
2944  int scale;
2945  int q1, q2 = 0;
2946 
2947  s->dsp.clear_block(block);
2948 
2949  /* XXX: Guard against dumb values of mquant */
2950  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2951 
2952  /* Set DC scale - y and c use the same */
2953  s->y_dc_scale = s->y_dc_scale_table[mquant];
2954  s->c_dc_scale = s->c_dc_scale_table[mquant];
2955 
2956  /* Get DC differential */
2957  if (n < 4) {
2959  } else {
2961  }
2962  if (dcdiff < 0) {
2963  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2964  return -1;
2965  }
2966  if (dcdiff) {
2967  if (dcdiff == 119 /* ESC index value */) {
2968  /* TODO: Optimize */
2969  if (mquant == 1) dcdiff = get_bits(gb, 10);
2970  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2971  else dcdiff = get_bits(gb, 8);
2972  } else {
2973  if (mquant == 1)
2974  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2975  else if (mquant == 2)
2976  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2977  }
2978  if (get_bits1(gb))
2979  dcdiff = -dcdiff;
2980  }
2981 
2982  /* Prediction */
2983  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2984  *dc_val = dcdiff;
2985 
2986  /* Store the quantized DC coeff, used for prediction */
2987 
2988  if (n < 4) {
2989  block[0] = dcdiff * s->y_dc_scale;
2990  } else {
2991  block[0] = dcdiff * s->c_dc_scale;
2992  }
2993 
2994  //AC Decoding
2995  i = 1;
2996 
2997  /* check if AC is needed at all and adjust direction if needed */
2998  if (!a_avail) dc_pred_dir = 1;
2999  if (!c_avail) dc_pred_dir = 0;
3000  if (!a_avail && !c_avail) use_pred = 0;
3001  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3002  ac_val2 = ac_val;
3003 
3004  scale = mquant * 2 + v->halfpq;
3005 
3006  if (dc_pred_dir) //left
3007  ac_val -= 16;
3008  else //top
3009  ac_val -= 16 * s->block_wrap[n];
3010 
3011  q1 = s->current_picture.f.qscale_table[mb_pos];
3012  if (dc_pred_dir && c_avail && mb_pos)
3013  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3014  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3015  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3016  if ( dc_pred_dir && n == 1)
3017  q2 = q1;
3018  if (!dc_pred_dir && n == 2)
3019  q2 = q1;
3020  if (n == 3) q2 = q1;
3021 
3022  if (coded) {
3023  int last = 0, skip, value;
3024  int k;
3025 
3026  while (!last) {
3027  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3028  i += skip;
3029  if (i > 63)
3030  break;
3031  if (v->fcm == PROGRESSIVE)
3032  block[v->zz_8x8[0][i++]] = value;
3033  else {
3034  if (use_pred && (v->fcm == ILACE_FRAME)) {
3035  if (!dc_pred_dir) // top
3036  block[v->zz_8x8[2][i++]] = value;
3037  else // left
3038  block[v->zz_8x8[3][i++]] = value;
3039  } else {
3040  block[v->zzi_8x8[i++]] = value;
3041  }
3042  }
3043  }
3044 
3045  /* apply AC prediction if needed */
3046  if (use_pred) {
3047  /* scale predictors if needed*/
3048  if (q2 && q1 != q2) {
3049  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3050  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3051 
3052  if (q1 < 1)
3053  return AVERROR_INVALIDDATA;
3054  if (dc_pred_dir) { // left
3055  for (k = 1; k < 8; k++)
3056  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3057  } else { //top
3058  for (k = 1; k < 8; k++)
3059  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3060  }
3061  } else {
3062  if (dc_pred_dir) { // left
3063  for (k = 1; k < 8; k++)
3064  block[k << v->left_blk_sh] += ac_val[k];
3065  } else { // top
3066  for (k = 1; k < 8; k++)
3067  block[k << v->top_blk_sh] += ac_val[k + 8];
3068  }
3069  }
3070  }
3071  /* save AC coeffs for further prediction */
3072  for (k = 1; k < 8; k++) {
3073  ac_val2[k ] = block[k << v->left_blk_sh];
3074  ac_val2[k + 8] = block[k << v->top_blk_sh];
3075  }
3076 
3077  /* scale AC coeffs */
3078  for (k = 1; k < 64; k++)
3079  if (block[k]) {
3080  block[k] *= scale;
3081  if (!v->pquantizer)
3082  block[k] += (block[k] < 0) ? -mquant : mquant;
3083  }
3084 
3085  if (use_pred) i = 63;
3086  } else { // no AC coeffs
3087  int k;
3088 
3089  memset(ac_val2, 0, 16 * 2);
3090  if (dc_pred_dir) { // left
3091  if (use_pred) {
3092  memcpy(ac_val2, ac_val, 8 * 2);
3093  if (q2 && q1 != q2) {
3094  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3095  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3096  if (q1 < 1)
3097  return AVERROR_INVALIDDATA;
3098  for (k = 1; k < 8; k++)
3099  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3100  }
3101  }
3102  } else { // top
3103  if (use_pred) {
3104  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3105  if (q2 && q1 != q2) {
3106  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3107  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3108  if (q1 < 1)
3109  return AVERROR_INVALIDDATA;
3110  for (k = 1; k < 8; k++)
3111  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3112  }
3113  }
3114  }
3115 
3116  /* apply AC prediction if needed */
3117  if (use_pred) {
3118  if (dc_pred_dir) { // left
3119  for (k = 1; k < 8; k++) {
3120  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3121  if (!v->pquantizer && block[k << v->left_blk_sh])
3122  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3123  }
3124  } else { // top
3125  for (k = 1; k < 8; k++) {
3126  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3127  if (!v->pquantizer && block[k << v->top_blk_sh])
3128  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3129  }
3130  }
3131  i = 63;
3132  }
3133  }
3134  s->block_last_index[n] = i;
3135 
3136  return 0;
3137 }
3138 
3141 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3142  int mquant, int ttmb, int first_block,
3143  uint8_t *dst, int linesize, int skip_block,
3144  int *ttmb_out)
3145 {
3146  MpegEncContext *s = &v->s;
3147  GetBitContext *gb = &s->gb;
3148  int i, j;
3149  int subblkpat = 0;
3150  int scale, off, idx, last, skip, value;
3151  int ttblk = ttmb & 7;
3152  int pat = 0;
3153 
3154  s->dsp.clear_block(block);
3155 
3156  if (ttmb == -1) {
3158  }
3159  if (ttblk == TT_4X4) {
3160  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3161  }
3162  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3163  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3164  || (!v->res_rtm_flag && !first_block))) {
3165  subblkpat = decode012(gb);
3166  if (subblkpat)
3167  subblkpat ^= 3; // swap decoded pattern bits
3168  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3169  ttblk = TT_8X4;
3170  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3171  ttblk = TT_4X8;
3172  }
3173  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3174 
3175  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3176  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3177  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3178  ttblk = TT_8X4;
3179  }
3180  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3181  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3182  ttblk = TT_4X8;
3183  }
3184  switch (ttblk) {
3185  case TT_8X8:
3186  pat = 0xF;
3187  i = 0;
3188  last = 0;
3189  while (!last) {
3190  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3191  i += skip;
3192  if (i > 63)
3193  break;
3194  if (!v->fcm)
3195  idx = v->zz_8x8[0][i++];
3196  else
3197  idx = v->zzi_8x8[i++];
3198  block[idx] = value * scale;
3199  if (!v->pquantizer)
3200  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3201  }
3202  if (!skip_block) {
3203  if (i == 1)
3204  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3205  else {
3206  v->vc1dsp.vc1_inv_trans_8x8(block);
3207  s->dsp.add_pixels_clamped(block, dst, linesize);
3208  }
3209  }
3210  break;
3211  case TT_4X4:
3212  pat = ~subblkpat & 0xF;
3213  for (j = 0; j < 4; j++) {
3214  last = subblkpat & (1 << (3 - j));
3215  i = 0;
3216  off = (j & 1) * 4 + (j & 2) * 16;
3217  while (!last) {
3218  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3219  i += skip;
3220  if (i > 15)
3221  break;
3222  if (!v->fcm)
3224  else
3225  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3226  block[idx + off] = value * scale;
3227  if (!v->pquantizer)
3228  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3229  }
3230  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3231  if (i == 1)
3232  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3233  else
3234  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3235  }
3236  }
3237  break;
3238  case TT_8X4:
3239  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3240  for (j = 0; j < 2; j++) {
3241  last = subblkpat & (1 << (1 - j));
3242  i = 0;
3243  off = j * 32;
3244  while (!last) {
3245  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3246  i += skip;
3247  if (i > 31)
3248  break;
3249  if (!v->fcm)
3250  idx = v->zz_8x4[i++] + off;
3251  else
3252  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3253  block[idx] = value * scale;
3254  if (!v->pquantizer)
3255  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3256  }
3257  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3258  if (i == 1)
3259  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3260  else
3261  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3262  }
3263  }
3264  break;
3265  case TT_4X8:
3266  pat = ~(subblkpat * 5) & 0xF;
3267  for (j = 0; j < 2; j++) {
3268  last = subblkpat & (1 << (1 - j));
3269  i = 0;
3270  off = j * 4;
3271  while (!last) {
3272  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3273  i += skip;
3274  if (i > 31)
3275  break;
3276  if (!v->fcm)
3277  idx = v->zz_4x8[i++] + off;
3278  else
3279  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3280  block[idx] = value * scale;
3281  if (!v->pquantizer)
3282  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3283  }
3284  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3285  if (i == 1)
3286  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3287  else
3288  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3289  }
3290  }
3291  break;
3292  }
3293  if (ttmb_out)
3294  *ttmb_out |= ttblk << (n * 4);
3295  return pat;
3296 }
3297  // Macroblock group
3299 
3300 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3301 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3302 
3304 {
3305  MpegEncContext *s = &v->s;
3306  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3307  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3308  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3309  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3310  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3311  uint8_t *dst;
3312 
3313  if (block_num > 3) {
3314  dst = s->dest[block_num - 3];
3315  } else {
3316  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3317  }
3318  if (s->mb_y != s->end_mb_y || block_num < 2) {
3319  int16_t (*mv)[2];
3320  int mv_stride;
3321 
3322  if (block_num > 3) {
3323  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3324  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3325  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3326  mv_stride = s->mb_stride;
3327  } else {
3328  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3329  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3330  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3331  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3332  mv_stride = s->b8_stride;
3333  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3334  }
3335 
3336  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3337  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3338  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3339  } else {
3340  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3341  if (idx == 3) {
3342  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3343  } else if (idx) {
3344  if (idx == 1)
3345  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3346  else
3347  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3348  }
3349  }
3350  }
3351 
3352  dst -= 4 * linesize;
3353  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3354  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3355  idx = (block_cbp | (block_cbp >> 2)) & 3;
3356  if (idx == 3) {
3357  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3358  } else if (idx) {
3359  if (idx == 1)
3360  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3361  else
3362  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3363  }
3364  }
3365 }
3366 
3368 {
3369  MpegEncContext *s = &v->s;
3370  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3371  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3372  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3373  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3374  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3375  uint8_t *dst;
3376 
3377  if (block_num > 3) {
3378  dst = s->dest[block_num - 3] - 8 * linesize;
3379  } else {
3380  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3381  }
3382 
3383  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3384  int16_t (*mv)[2];
3385 
3386  if (block_num > 3) {
3387  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3388  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3389  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3390  } else {
3391  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3392  : (mb_cbp >> ((block_num + 1) * 4));
3393  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3394  : (mb_is_intra >> ((block_num + 1) * 4));
3395  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3396  }
3397  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3398  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3399  } else {
3400  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3401  if (idx == 5) {
3402  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3403  } else if (idx) {
3404  if (idx == 1)
3405  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3406  else
3407  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3408  }
3409  }
3410  }
3411 
3412  dst -= 4;
3413  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3414  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3415  idx = (block_cbp | (block_cbp >> 1)) & 5;
3416  if (idx == 5) {
3417  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3418  } else if (idx) {
3419  if (idx == 1)
3420  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3421  else
3422  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3423  }
3424  }
3425 }
3426 
3428 {
3429  MpegEncContext *s = &v->s;
3430  int i;
3431 
3432  for (i = 0; i < 6; i++) {
3434  }
3435 
3436  /* V always precedes H, therefore we run H one MB before V;
3437  * at the end of a row, we catch up to complete the row */
3438  if (s->mb_x) {
3439  for (i = 0; i < 6; i++) {
3441  }
3442  if (s->mb_x == s->mb_width - 1) {
3443  s->mb_x++;
3445  for (i = 0; i < 6; i++) {
3447  }
3448  }
3449  }
3450 }
3451 
3455 {
3456  MpegEncContext *s = &v->s;
3457  GetBitContext *gb = &s->gb;
3458  int i, j;
3459  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3460  int cbp; /* cbp decoding stuff */
3461  int mqdiff, mquant; /* MB quantization */
3462  int ttmb = v->ttfrm; /* MB Transform type */
3463 
3464  int mb_has_coeffs = 1; /* last_flag */
3465  int dmv_x, dmv_y; /* Differential MV components */
3466  int index, index1; /* LUT indexes */
3467  int val, sign; /* temp values */
3468  int first_block = 1;
3469  int dst_idx, off;
3470  int skipped, fourmv;
3471  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3472 
3473  mquant = v->pq; /* lossy initialization */
3474 
3475  if (v->mv_type_is_raw)
3476  fourmv = get_bits1(gb);
3477  else
3478  fourmv = v->mv_type_mb_plane[mb_pos];
3479  if (v->skip_is_raw)
3480  skipped = get_bits1(gb);
3481  else
3482  skipped = v->s.mbskip_table[mb_pos];
3483 
3484  if (!fourmv) { /* 1MV mode */
3485  if (!skipped) {
3486  GET_MVDATA(dmv_x, dmv_y);
3487 
3488  if (s->mb_intra) {
3489  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3490  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3491  }
3493  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3494 
3495  /* FIXME Set DC val for inter block ? */
3496  if (s->mb_intra && !mb_has_coeffs) {
3497  GET_MQUANT();
3498  s->ac_pred = get_bits1(gb);
3499  cbp = 0;
3500  } else if (mb_has_coeffs) {
3501  if (s->mb_intra)
3502  s->ac_pred = get_bits1(gb);
3503  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3504  GET_MQUANT();
3505  } else {
3506  mquant = v->pq;
3507  cbp = 0;
3508  }
3509  s->current_picture.f.qscale_table[mb_pos] = mquant;
3510 
3511  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3512  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3513  VC1_TTMB_VLC_BITS, 2);
3514  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3515  dst_idx = 0;
3516  for (i = 0; i < 6; i++) {
3517  s->dc_val[0][s->block_index[i]] = 0;
3518  dst_idx += i >> 2;
3519  val = ((cbp >> (5 - i)) & 1);
3520  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3521  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3522  if (s->mb_intra) {
3523  /* check if prediction blocks A and C are available */
3524  v->a_avail = v->c_avail = 0;
3525  if (i == 2 || i == 3 || !s->first_slice_line)
3526  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3527  if (i == 1 || i == 3 || s->mb_x)
3528  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3529 
3530  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3531  (i & 4) ? v->codingset2 : v->codingset);
3532  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3533  continue;
3534  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3535  if (v->rangeredfrm)
3536  for (j = 0; j < 64; j++)
3537  s->block[i][j] <<= 1;
3538  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3539  if (v->pq >= 9 && v->overlap) {
3540  if (v->c_avail)
3541  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3542  if (v->a_avail)
3543  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3544  }
3545  block_cbp |= 0xF << (i << 2);
3546  block_intra |= 1 << i;
3547  } else if (val) {
3548  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3549  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3550  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3551  block_cbp |= pat << (i << 2);
3552  if (!v->ttmbf && ttmb < 8)
3553  ttmb = -1;
3554  first_block = 0;
3555  }
3556  }
3557  } else { // skipped
3558  s->mb_intra = 0;
3559  for (i = 0; i < 6; i++) {
3560  v->mb_type[0][s->block_index[i]] = 0;
3561  s->dc_val[0][s->block_index[i]] = 0;
3562  }
3563  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3564  s->current_picture.f.qscale_table[mb_pos] = 0;
3565  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3566  vc1_mc_1mv(v, 0);
3567  }
3568  } else { // 4MV mode
3569  if (!skipped /* unskipped MB */) {
3570  int intra_count = 0, coded_inter = 0;
3571  int is_intra[6], is_coded[6];
3572  /* Get CBPCY */
3573  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3574  for (i = 0; i < 6; i++) {
3575  val = ((cbp >> (5 - i)) & 1);
3576  s->dc_val[0][s->block_index[i]] = 0;
3577  s->mb_intra = 0;
3578  if (i < 4) {
3579  dmv_x = dmv_y = 0;
3580  s->mb_intra = 0;
3581  mb_has_coeffs = 0;
3582  if (val) {
3583  GET_MVDATA(dmv_x, dmv_y);
3584  }
3585  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3586  if (!s->mb_intra)
3587  vc1_mc_4mv_luma(v, i, 0);
3588  intra_count += s->mb_intra;
3589  is_intra[i] = s->mb_intra;
3590  is_coded[i] = mb_has_coeffs;
3591  }
3592  if (i & 4) {
3593  is_intra[i] = (intra_count >= 3);
3594  is_coded[i] = val;
3595  }
3596  if (i == 4)
3597  vc1_mc_4mv_chroma(v, 0);
3598  v->mb_type[0][s->block_index[i]] = is_intra[i];
3599  if (!coded_inter)
3600  coded_inter = !is_intra[i] & is_coded[i];
3601  }
3602  // if there are no coded blocks then don't do anything more
3603  dst_idx = 0;
3604  if (!intra_count && !coded_inter)
3605  goto end;
3606  GET_MQUANT();
3607  s->current_picture.f.qscale_table[mb_pos] = mquant;
3608  /* test if block is intra and has pred */
3609  {
3610  int intrapred = 0;
3611  for (i = 0; i < 6; i++)
3612  if (is_intra[i]) {
3613  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3614  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3615  intrapred = 1;
3616  break;
3617  }
3618  }
3619  if (intrapred)
3620  s->ac_pred = get_bits1(gb);
3621  else
3622  s->ac_pred = 0;
3623  }
3624  if (!v->ttmbf && coded_inter)
3626  for (i = 0; i < 6; i++) {
3627  dst_idx += i >> 2;
3628  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3629  s->mb_intra = is_intra[i];
3630  if (is_intra[i]) {
3631  /* check if prediction blocks A and C are available */
3632  v->a_avail = v->c_avail = 0;
3633  if (i == 2 || i == 3 || !s->first_slice_line)
3634  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3635  if (i == 1 || i == 3 || s->mb_x)
3636  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3637 
3638  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3639  (i & 4) ? v->codingset2 : v->codingset);
3640  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3641  continue;
3642  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3643  if (v->rangeredfrm)
3644  for (j = 0; j < 64; j++)
3645  s->block[i][j] <<= 1;
3646  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3647  (i & 4) ? s->uvlinesize : s->linesize);
3648  if (v->pq >= 9 && v->overlap) {
3649  if (v->c_avail)
3650  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3651  if (v->a_avail)
3652  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3653  }
3654  block_cbp |= 0xF << (i << 2);
3655  block_intra |= 1 << i;
3656  } else if (is_coded[i]) {
3657  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3658  first_block, s->dest[dst_idx] + off,
3659  (i & 4) ? s->uvlinesize : s->linesize,
3660  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3661  &block_tt);
3662  block_cbp |= pat << (i << 2);
3663  if (!v->ttmbf && ttmb < 8)
3664  ttmb = -1;
3665  first_block = 0;
3666  }
3667  }
3668  } else { // skipped MB
3669  s->mb_intra = 0;
3670  s->current_picture.f.qscale_table[mb_pos] = 0;
3671  for (i = 0; i < 6; i++) {
3672  v->mb_type[0][s->block_index[i]] = 0;
3673  s->dc_val[0][s->block_index[i]] = 0;
3674  }
3675  for (i = 0; i < 4; i++) {
3676  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3677  vc1_mc_4mv_luma(v, i, 0);
3678  }
3679  vc1_mc_4mv_chroma(v, 0);
3680  s->current_picture.f.qscale_table[mb_pos] = 0;
3681  }
3682  }
3683 end:
3684  v->cbp[s->mb_x] = block_cbp;
3685  v->ttblk[s->mb_x] = block_tt;
3686  v->is_intra[s->mb_x] = block_intra;
3687 
3688  return 0;
3689 }
3690 
3691 /* Decode one macroblock in an interlaced frame p picture */
3692 
3694 {
3695  MpegEncContext *s = &v->s;
3696  GetBitContext *gb = &s->gb;
3697  int i;
3698  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3699  int cbp = 0; /* cbp decoding stuff */
3700  int mqdiff, mquant; /* MB quantization */
3701  int ttmb = v->ttfrm; /* MB Transform type */
3702 
3703  int mb_has_coeffs = 1; /* last_flag */
3704  int dmv_x, dmv_y; /* Differential MV components */
3705  int val; /* temp value */
3706  int first_block = 1;
3707  int dst_idx, off;
3708  int skipped, fourmv = 0, twomv = 0;
3709  int block_cbp = 0, pat, block_tt = 0;
3710  int idx_mbmode = 0, mvbp;
3711  int stride_y, fieldtx;
3712 
3713  mquant = v->pq; /* Loosy initialization */
3714 
3715  if (v->skip_is_raw)
3716  skipped = get_bits1(gb);
3717  else
3718  skipped = v->s.mbskip_table[mb_pos];
3719  if (!skipped) {
3720  if (v->fourmvswitch)
3721  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3722  else
3723  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3724  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3725  /* store the motion vector type in a flag (useful later) */
3726  case MV_PMODE_INTFR_4MV:
3727  fourmv = 1;
3728  v->blk_mv_type[s->block_index[0]] = 0;
3729  v->blk_mv_type[s->block_index[1]] = 0;
3730  v->blk_mv_type[s->block_index[2]] = 0;
3731  v->blk_mv_type[s->block_index[3]] = 0;
3732  break;
3734  fourmv = 1;
3735  v->blk_mv_type[s->block_index[0]] = 1;
3736  v->blk_mv_type[s->block_index[1]] = 1;
3737  v->blk_mv_type[s->block_index[2]] = 1;
3738  v->blk_mv_type[s->block_index[3]] = 1;
3739  break;
3741  twomv = 1;
3742  v->blk_mv_type[s->block_index[0]] = 1;
3743  v->blk_mv_type[s->block_index[1]] = 1;
3744  v->blk_mv_type[s->block_index[2]] = 1;
3745  v->blk_mv_type[s->block_index[3]] = 1;
3746  break;
3747  case MV_PMODE_INTFR_1MV:
3748  v->blk_mv_type[s->block_index[0]] = 0;
3749  v->blk_mv_type[s->block_index[1]] = 0;
3750  v->blk_mv_type[s->block_index[2]] = 0;
3751  v->blk_mv_type[s->block_index[3]] = 0;
3752  break;
3753  }
3754  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3755  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3756  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3757  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3758  s->mb_intra = v->is_intra[s->mb_x] = 1;
3759  for (i = 0; i < 6; i++)
3760  v->mb_type[0][s->block_index[i]] = 1;
3761  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3762  mb_has_coeffs = get_bits1(gb);
3763  if (mb_has_coeffs)
3764  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3765  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3766  GET_MQUANT();
3767  s->current_picture.f.qscale_table[mb_pos] = mquant;
3768  /* Set DC scale - y and c use the same (not sure if necessary here) */
3769  s->y_dc_scale = s->y_dc_scale_table[mquant];
3770  s->c_dc_scale = s->c_dc_scale_table[mquant];
3771  dst_idx = 0;
3772  for (i = 0; i < 6; i++) {
3773  s->dc_val[0][s->block_index[i]] = 0;
3774  dst_idx += i >> 2;
3775  val = ((cbp >> (5 - i)) & 1);
3776  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3777  v->a_avail = v->c_avail = 0;
3778  if (i == 2 || i == 3 || !s->first_slice_line)
3779  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3780  if (i == 1 || i == 3 || s->mb_x)
3781  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3782 
3783  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3784  (i & 4) ? v->codingset2 : v->codingset);
3785  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3786  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3787  if (i < 4) {
3788  stride_y = s->linesize << fieldtx;
3789  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3790  } else {
3791  stride_y = s->uvlinesize;
3792  off = 0;
3793  }
3794  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3795  //TODO: loop filter
3796  }
3797 
3798  } else { // inter MB
3799  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3800  if (mb_has_coeffs)
3801  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3802  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3804  } else {
3805  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3806  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3808  }
3809  }
3810  s->mb_intra = v->is_intra[s->mb_x] = 0;
3811  for (i = 0; i < 6; i++)
3812  v->mb_type[0][s->block_index[i]] = 0;
3813  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3814  /* for all motion vector read MVDATA and motion compensate each block */
3815  dst_idx = 0;
3816  if (fourmv) {
3817  mvbp = v->fourmvbp;
3818  for (i = 0; i < 6; i++) {
3819  if (i < 4) {
3820  dmv_x = dmv_y = 0;
3821  val = ((mvbp >> (3 - i)) & 1);
3822  if (val) {
3823  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3824  }
3825  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3826  vc1_mc_4mv_luma(v, i, 0);
3827  } else if (i == 4) {
3828  vc1_mc_4mv_chroma4(v);
3829  }
3830  }
3831  } else if (twomv) {
3832  mvbp = v->twomvbp;
3833  dmv_x = dmv_y = 0;
3834  if (mvbp & 2) {
3835  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3836  }
3837  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3838  vc1_mc_4mv_luma(v, 0, 0);
3839  vc1_mc_4mv_luma(v, 1, 0);
3840  dmv_x = dmv_y = 0;
3841  if (mvbp & 1) {
3842  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3843  }
3844  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3845  vc1_mc_4mv_luma(v, 2, 0);
3846  vc1_mc_4mv_luma(v, 3, 0);
3847  vc1_mc_4mv_chroma4(v);
3848  } else {
3849  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3850  dmv_x = dmv_y = 0;
3851  if (mvbp) {
3852  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3853  }
3854  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3855  vc1_mc_1mv(v, 0);
3856  }
3857  if (cbp)
3858  GET_MQUANT(); // p. 227
3859  s->current_picture.f.qscale_table[mb_pos] = mquant;
3860  if (!v->ttmbf && cbp)
3862  for (i = 0; i < 6; i++) {
3863  s->dc_val[0][s->block_index[i]] = 0;
3864  dst_idx += i >> 2;
3865  val = ((cbp >> (5 - i)) & 1);
3866  if (!fieldtx)
3867  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3868  else
3869  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3870  if (val) {
3871  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3872  first_block, s->dest[dst_idx] + off,
3873  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3874  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3875  block_cbp |= pat << (i << 2);
3876  if (!v->ttmbf && ttmb < 8)
3877  ttmb = -1;
3878  first_block = 0;
3879  }
3880  }
3881  }
3882  } else { // skipped
3883  s->mb_intra = v->is_intra[s->mb_x] = 0;
3884  for (i = 0; i < 6; i++) {
3885  v->mb_type[0][s->block_index[i]] = 0;
3886  s->dc_val[0][s->block_index[i]] = 0;
3887  }
3888  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3889  s->current_picture.f.qscale_table[mb_pos] = 0;
3890  v->blk_mv_type[s->block_index[0]] = 0;
3891  v->blk_mv_type[s->block_index[1]] = 0;
3892  v->blk_mv_type[s->block_index[2]] = 0;
3893  v->blk_mv_type[s->block_index[3]] = 0;
3894  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3895  vc1_mc_1mv(v, 0);
3896  }
3897  if (s->mb_x == s->mb_width - 1)
3898  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3899  return 0;
3900 }
3901 
3903 {
3904  MpegEncContext *s = &v->s;
3905  GetBitContext *gb = &s->gb;
3906  int i;
3907  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3908  int cbp = 0; /* cbp decoding stuff */
3909  int mqdiff, mquant; /* MB quantization */
3910  int ttmb = v->ttfrm; /* MB Transform type */
3911 
3912  int mb_has_coeffs = 1; /* last_flag */
3913  int dmv_x, dmv_y; /* Differential MV components */
3914  int val; /* temp values */
3915  int first_block = 1;
3916  int dst_idx, off;
3917  int pred_flag;
3918  int block_cbp = 0, pat, block_tt = 0;
3919  int idx_mbmode = 0;
3920 
3921  mquant = v->pq; /* Loosy initialization */
3922 
3923  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3924  if (idx_mbmode <= 1) { // intra MB
3925  s->mb_intra = v->is_intra[s->mb_x] = 1;
3926  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3927  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3928  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3929  GET_MQUANT();
3930  s->current_picture.f.qscale_table[mb_pos] = mquant;
3931  /* Set DC scale - y and c use the same (not sure if necessary here) */
3932  s->y_dc_scale = s->y_dc_scale_table[mquant];
3933  s->c_dc_scale = s->c_dc_scale_table[mquant];
3934  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3935  mb_has_coeffs = idx_mbmode & 1;
3936  if (mb_has_coeffs)
3937  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3938  dst_idx = 0;
3939  for (i = 0; i < 6; i++) {
3940  s->dc_val[0][s->block_index[i]] = 0;
3941  v->mb_type[0][s->block_index[i]] = 1;
3942  dst_idx += i >> 2;
3943  val = ((cbp >> (5 - i)) & 1);
3944  v->a_avail = v->c_avail = 0;
3945  if (i == 2 || i == 3 || !s->first_slice_line)
3946  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3947  if (i == 1 || i == 3 || s->mb_x)
3948  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3949 
3950  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3951  (i & 4) ? v->codingset2 : v->codingset);
3952  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3953  continue;
3954  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3955  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3956  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
3957  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3958  // TODO: loop filter
3959  }
3960  } else {
3961  s->mb_intra = v->is_intra[s->mb_x] = 0;
3962  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3963  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3964  if (idx_mbmode <= 5) { // 1-MV
3965  dmv_x = dmv_y = pred_flag = 0;
3966  if (idx_mbmode & 1) {
3967  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3968  }
3969  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3970  vc1_mc_1mv(v, 0);
3971  mb_has_coeffs = !(idx_mbmode & 2);
3972  } else { // 4-MV
3974  for (i = 0; i < 6; i++) {
3975  if (i < 4) {
3976  dmv_x = dmv_y = pred_flag = 0;
3977  val = ((v->fourmvbp >> (3 - i)) & 1);
3978  if (val) {
3979  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3980  }
3981  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3982  vc1_mc_4mv_luma(v, i, 0);
3983  } else if (i == 4)
3984  vc1_mc_4mv_chroma(v, 0);
3985  }
3986  mb_has_coeffs = idx_mbmode & 1;
3987  }
3988  if (mb_has_coeffs)
3989  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3990  if (cbp) {
3991  GET_MQUANT();
3992  }
3993  s->current_picture.f.qscale_table[mb_pos] = mquant;
3994  if (!v->ttmbf && cbp) {
3996  }
3997  dst_idx = 0;
3998  for (i = 0; i < 6; i++) {
3999  s->dc_val[0][s->block_index[i]] = 0;
4000  dst_idx += i >> 2;
4001  val = ((cbp >> (5 - i)) & 1);
4002  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4003  if (v->cur_field_type)
4004  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4005  if (val) {
4006  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4007  first_block, s->dest[dst_idx] + off,
4008  (i & 4) ? s->uvlinesize : s->linesize,
4009  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4010  &block_tt);
4011  block_cbp |= pat << (i << 2);
4012  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4013  first_block = 0;
4014  }
4015  }
4016  }
4017  if (s->mb_x == s->mb_width - 1)
4018  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4019  return 0;
4020 }
4021 
4025 {
4026  MpegEncContext *s = &v->s;
4027  GetBitContext *gb = &s->gb;
4028  int i, j;
4029  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4030  int cbp = 0; /* cbp decoding stuff */
4031  int mqdiff, mquant; /* MB quantization */
4032  int ttmb = v->ttfrm; /* MB Transform type */
4033  int mb_has_coeffs = 0; /* last_flag */
4034  int index, index1; /* LUT indexes */
4035  int val, sign; /* temp values */
4036  int first_block = 1;
4037  int dst_idx, off;
4038  int skipped, direct;
4039  int dmv_x[2], dmv_y[2];
4040  int bmvtype = BMV_TYPE_BACKWARD;
4041 
4042  mquant = v->pq; /* lossy initialization */
4043  s->mb_intra = 0;
4044 
4045  if (v->dmb_is_raw)
4046  direct = get_bits1(gb);
4047  else
4048  direct = v->direct_mb_plane[mb_pos];
4049  if (v->skip_is_raw)
4050  skipped = get_bits1(gb);
4051  else
4052  skipped = v->s.mbskip_table[mb_pos];
4053 
4054  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4055  for (i = 0; i < 6; i++) {
4056  v->mb_type[0][s->block_index[i]] = 0;
4057  s->dc_val[0][s->block_index[i]] = 0;
4058  }
4059  s->current_picture.f.qscale_table[mb_pos] = 0;
4060 
4061  if (!direct) {
4062  if (!skipped) {
4063  GET_MVDATA(dmv_x[0], dmv_y[0]);
4064  dmv_x[1] = dmv_x[0];
4065  dmv_y[1] = dmv_y[0];
4066  }
4067  if (skipped || !s->mb_intra) {
4068  bmvtype = decode012(gb);
4069  switch (bmvtype) {
4070  case 0:
4071  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4072  break;
4073  case 1:
4074  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4075  break;
4076  case 2:
4077  bmvtype = BMV_TYPE_INTERPOLATED;
4078  dmv_x[0] = dmv_y[0] = 0;
4079  }
4080  }
4081  }
4082  for (i = 0; i < 6; i++)
4083  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4084 
4085  if (skipped) {
4086  if (direct)
4087  bmvtype = BMV_TYPE_INTERPOLATED;
4088  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4089  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4090  return;
4091  }
4092  if (direct) {
4093  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4094  GET_MQUANT();
4095  s->mb_intra = 0;
4096  s->current_picture.f.qscale_table[mb_pos] = mquant;
4097  if (!v->ttmbf)
4099  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4100  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4101  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4102  } else {
4103  if (!mb_has_coeffs && !s->mb_intra) {
4104  /* no coded blocks - effectively skipped */
4105  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4106  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4107  return;
4108  }
4109  if (s->mb_intra && !mb_has_coeffs) {
4110  GET_MQUANT();
4111  s->current_picture.f.qscale_table[mb_pos] = mquant;
4112  s->ac_pred = get_bits1(gb);
4113  cbp = 0;
4114  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4115  } else {
4116  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4117  GET_MVDATA(dmv_x[0], dmv_y[0]);
4118  if (!mb_has_coeffs) {
4119  /* interpolated skipped block */
4120  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4121  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4122  return;
4123  }
4124  }
4125  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4126  if (!s->mb_intra) {
4127  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4128  }
4129  if (s->mb_intra)
4130  s->ac_pred = get_bits1(gb);
4131  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4132  GET_MQUANT();
4133  s->current_picture.f.qscale_table[mb_pos] = mquant;
4134  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4136  }
4137  }
4138  dst_idx = 0;
4139  for (i = 0; i < 6; i++) {
4140  s->dc_val[0][s->block_index[i]] = 0;
4141  dst_idx += i >> 2;
4142  val = ((cbp >> (5 - i)) & 1);
4143  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4144  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4145  if (s->mb_intra) {
4146  /* check if prediction blocks A and C are available */
4147  v->a_avail = v->c_avail = 0;
4148  if (i == 2 || i == 3 || !s->first_slice_line)
4149  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4150  if (i == 1 || i == 3 || s->mb_x)
4151  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4152 
4153  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4154  (i & 4) ? v->codingset2 : v->codingset);
4155  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4156  continue;
4157  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4158  if (v->rangeredfrm)
4159  for (j = 0; j < 64; j++)
4160  s->block[i][j] <<= 1;
4161  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4162  } else if (val) {
4163  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4164  first_block, s->dest[dst_idx] + off,
4165  (i & 4) ? s->uvlinesize : s->linesize,
4166  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4167  if (!v->ttmbf && ttmb < 8)
4168  ttmb = -1;
4169  first_block = 0;
4170  }
4171  }
4172 }
4173 
4177 {
4178  MpegEncContext *s = &v->s;
4179  GetBitContext *gb = &s->gb;
4180  int i, j;
4181  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4182  int cbp = 0; /* cbp decoding stuff */
4183  int mqdiff, mquant; /* MB quantization */
4184  int ttmb = v->ttfrm; /* MB Transform type */
4185  int mb_has_coeffs = 0; /* last_flag */
4186  int val; /* temp value */
4187  int first_block = 1;
4188  int dst_idx, off;
4189  int fwd;
4190  int dmv_x[2], dmv_y[2], pred_flag[2];
4191  int bmvtype = BMV_TYPE_BACKWARD;
4192  int idx_mbmode, interpmvp;
4193 
4194  mquant = v->pq; /* Loosy initialization */
4195  s->mb_intra = 0;
4196 
4197  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4198  if (idx_mbmode <= 1) { // intra MB
4199  s->mb_intra = v->is_intra[s->mb_x] = 1;
4200  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4201  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4202  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4203  GET_MQUANT();
4204  s->current_picture.f.qscale_table[mb_pos] = mquant;
4205  /* Set DC scale - y and c use the same (not sure if necessary here) */
4206  s->y_dc_scale = s->y_dc_scale_table[mquant];
4207  s->c_dc_scale = s->c_dc_scale_table[mquant];
4208  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4209  mb_has_coeffs = idx_mbmode & 1;
4210  if (mb_has_coeffs)
4211  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4212  dst_idx = 0;
4213  for (i = 0; i < 6; i++) {
4214  s->dc_val[0][s->block_index[i]] = 0;
4215  dst_idx += i >> 2;
4216  val = ((cbp >> (5 - i)) & 1);
4217  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4218  v->a_avail = v->c_avail = 0;
4219  if (i == 2 || i == 3 || !s->first_slice_line)
4220  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4221  if (i == 1 || i == 3 || s->mb_x)
4222  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4223 
4224  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4225  (i & 4) ? v->codingset2 : v->codingset);
4226  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4227  continue;
4228  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4229  if (v->rangeredfrm)
4230  for (j = 0; j < 64; j++)
4231  s->block[i][j] <<= 1;
4232  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4233  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4234  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4235  // TODO: yet to perform loop filter
4236  }
4237  } else {
4238  s->mb_intra = v->is_intra[s->mb_x] = 0;
4239  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4240  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4241  if (v->fmb_is_raw)
4242  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4243  else
4244  fwd = v->forward_mb_plane[mb_pos];
4245  if (idx_mbmode <= 5) { // 1-MV
4246  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4247  pred_flag[0] = pred_flag[1] = 0;
4248  if (fwd)
4249  bmvtype = BMV_TYPE_FORWARD;
4250  else {
4251  bmvtype = decode012(gb);
4252  switch (bmvtype) {
4253  case 0:
4254  bmvtype = BMV_TYPE_BACKWARD;
4255  break;
4256  case 1:
4257  bmvtype = BMV_TYPE_DIRECT;
4258  break;
4259  case 2:
4260  bmvtype = BMV_TYPE_INTERPOLATED;
4261  interpmvp = get_bits1(gb);
4262  }
4263  }
4264  v->bmvtype = bmvtype;
4265  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4266  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4267  }
4268  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4269  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4270  }
4271  if (bmvtype == BMV_TYPE_DIRECT) {
4272  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4273  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4274  }
4275  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4276  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4277  mb_has_coeffs = !(idx_mbmode & 2);
4278  } else { // 4-MV
4279  if (fwd)
4280  bmvtype = BMV_TYPE_FORWARD;
4281  v->bmvtype = bmvtype;
4283  for (i = 0; i < 6; i++) {
4284  if (i < 4) {
4285  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4286  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4287  val = ((v->fourmvbp >> (3 - i)) & 1);
4288  if (val) {
4289  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4290  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4291  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4292  }
4293  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4294  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4295  } else if (i == 4)
4296  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4297  }
4298  mb_has_coeffs = idx_mbmode & 1;
4299  }
4300  if (mb_has_coeffs)
4301  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4302  if (cbp) {
4303  GET_MQUANT();
4304  }
4305  s->current_picture.f.qscale_table[mb_pos] = mquant;
4306  if (!v->ttmbf && cbp) {
4308  }
4309  dst_idx = 0;
4310  for (i = 0; i < 6; i++) {
4311  s->dc_val[0][s->block_index[i]] = 0;
4312  dst_idx += i >> 2;
4313  val = ((cbp >> (5 - i)) & 1);
4314  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4315  if (v->cur_field_type)
4316  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4317  if (val) {
4318  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4319  first_block, s->dest[dst_idx] + off,
4320  (i & 4) ? s->uvlinesize : s->linesize,
4321  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4322  if (!v->ttmbf && ttmb < 8)
4323  ttmb = -1;
4324  first_block = 0;
4325  }
4326  }
4327  }
4328 }
4329 
4333 {
4334  int k, j;
4335  MpegEncContext *s = &v->s;
4336  int cbp, val;
4337  uint8_t *coded_val;
4338  int mb_pos;
4339 
4340  /* select codingmode used for VLC tables selection */
4341  switch (v->y_ac_table_index) {
4342  case 0:
4344  break;
4345  case 1:
4347  break;
4348  case 2:
4350  break;
4351  }
4352 
4353  switch (v->c_ac_table_index) {
4354  case 0:
4356  break;
4357  case 1:
4359  break;
4360  case 2:
4362  break;
4363  }
4364 
4365  /* Set DC scale - y and c use the same */
4366  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4367  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4368 
4369  //do frame decode
4370  s->mb_x = s->mb_y = 0;
4371  s->mb_intra = 1;
4372  s->first_slice_line = 1;
4373  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4374  s->mb_x = 0;
4376  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4377  uint8_t *dst[6];
4379  dst[0] = s->dest[0];
4380  dst[1] = dst[0] + 8;
4381  dst[2] = s->dest[0] + s->linesize * 8;
4382  dst[3] = dst[2] + 8;
4383  dst[4] = s->dest[1];
4384  dst[5] = s->dest[2];
4385  s->dsp.clear_blocks(s->block[0]);
4386  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4387  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4388  s->current_picture.f.qscale_table[mb_pos] = v->pq;
4389  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4390  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4391 
4392  // do actual MB decoding and displaying
4394  v->s.ac_pred = get_bits1(&v->s.gb);
4395 
4396  for (k = 0; k < 6; k++) {
4397  val = ((cbp >> (5 - k)) & 1);
4398 
4399  if (k < 4) {
4400  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4401  val = val ^ pred;
4402  *coded_val = val;
4403  }
4404  cbp |= val << (5 - k);
4405 
4406  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4407 
4408  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4409  continue;
4410  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4411  if (v->pq >= 9 && v->overlap) {
4412  if (v->rangeredfrm)
4413  for (j = 0; j < 64; j++)
4414  s->block[k][j] <<= 1;
4415  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4416  } else {
4417  if (v->rangeredfrm)
4418  for (j = 0; j < 64; j++)
4419  s->block[k][j] = (s->block[k][j] - 64) << 1;
4420  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4421  }
4422  }
4423 
4424  if (v->pq >= 9 && v->overlap) {
4425  if (s->mb_x) {
4426  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4427  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4428  if (!(s->flags & CODEC_FLAG_GRAY)) {
4429  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4430  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4431  }
4432  }
4433  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4434  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4435  if (!s->first_slice_line) {
4436  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4437  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4438  if (!(s->flags & CODEC_FLAG_GRAY)) {
4439  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4440  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4441  }
4442  }
4443  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4444  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4445  }
4446  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4447 
4448  if (get_bits_count(&s->gb) > v->bits) {
4449  ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4450  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4451  get_bits_count(&s->gb), v->bits);
4452  return;
4453  }
4454  }
4455  if (!v->s.loop_filter)
4456  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4457  else if (s->mb_y)
4458  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4459 
4460  s->first_slice_line = 0;
4461  }
4462  if (v->s.loop_filter)
4463  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4464 
4465  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4466  * profile, these only differ are when decoding MSS2 rectangles. */
4467  ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4468 }
4469 
4473 {
4474  int k;
4475  MpegEncContext *s = &v->s;
4476  int cbp, val;
4477  uint8_t *coded_val;
4478  int mb_pos;
4479  int mquant = v->pq;
4480  int mqdiff;
4481  GetBitContext *gb = &s->gb;
4482 
4483  /* select codingmode used for VLC tables selection */
4484  switch (v->y_ac_table_index) {
4485  case 0:
4487  break;
4488  case 1:
4490  break;
4491  case 2:
4493  break;
4494  }
4495 
4496  switch (v->c_ac_table_index) {
4497  case 0:
4499  break;
4500  case 1:
4502  break;
4503  case 2:
4505  break;
4506  }
4507 
4508  // do frame decode
4509  s->mb_x = s->mb_y = 0;
4510  s->mb_intra = 1;
4511  s->first_slice_line = 1;
4512  s->mb_y = s->start_mb_y;
4513  if (s->start_mb_y) {
4514  s->mb_x = 0;
4516  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4517  (1 + s->b8_stride) * sizeof(*s->coded_block));
4518  }
4519  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4520  s->mb_x = 0;
4522  for (;s->mb_x < s->mb_width; s->mb_x++) {
4523  DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4525  s->dsp.clear_blocks(block[0]);
4526  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4527  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4528  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4529  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4530 
4531  // do actual MB decoding and displaying
4532  if (v->fieldtx_is_raw)
4533  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4535  if ( v->acpred_is_raw)
4536  v->s.ac_pred = get_bits1(&v->s.gb);
4537  else
4538  v->s.ac_pred = v->acpred_plane[mb_pos];
4539 
4540  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4541  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4542 
4543  GET_MQUANT();
4544 
4545  s->current_picture.f.qscale_table[mb_pos] = mquant;
4546  /* Set DC scale - y and c use the same */
4547  s->y_dc_scale = s->y_dc_scale_table[mquant];
4548  s->c_dc_scale = s->c_dc_scale_table[mquant];
4549 
4550  for (k = 0; k < 6; k++) {
4551  val = ((cbp >> (5 - k)) & 1);
4552 
4553  if (k < 4) {
4554  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4555  val = val ^ pred;
4556  *coded_val = val;
4557  }
4558  cbp |= val << (5 - k);
4559 
4560  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4561  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4562 
4563  vc1_decode_i_block_adv(v, block[k], k, val,
4564  (k < 4) ? v->codingset : v->codingset2, mquant);
4565 
4566  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4567  continue;
4569  }
4570 
4574 
4575  if (get_bits_count(&s->gb) > v->bits) {
4576  // TODO: may need modification to handle slice coding
4577  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4578  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4579  get_bits_count(&s->gb), v->bits);
4580  return;
4581  }
4582  }
4583  if (!v->s.loop_filter)
4584  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4585  else if (s->mb_y)
4586  ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4587  s->first_slice_line = 0;
4588  }
4589 
4590  /* raw bottom MB row */
4591  s->mb_x = 0;
4593  for (;s->mb_x < s->mb_width; s->mb_x++) {
4596  if (v->s.loop_filter)
4598  }
4599  if (v->s.loop_filter)
4600  ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4601  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4602  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4603 }
4604 
4606 {
4607  MpegEncContext *s = &v->s;
4608  int apply_loop_filter;
4609 
4610  /* select codingmode used for VLC tables selection */
4611  switch (v->c_ac_table_index) {
4612  case 0:
4614  break;
4615  case 1:
4617  break;
4618  case 2:
4620  break;
4621  }
4622 
4623  switch (v->c_ac_table_index) {
4624  case 0:
4626  break;
4627  case 1:
4629  break;
4630  case 2:
4632  break;
4633  }
4634 
4635  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4636  s->first_slice_line = 1;
4637  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4638  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4639  s->mb_x = 0;
4641  for (; s->mb_x < s->mb_width; s->mb_x++) {
4643 
4644  if (v->fcm == ILACE_FIELD)
4646  else if (v->fcm == ILACE_FRAME)
4648  else vc1_decode_p_mb(v);
4649  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4651  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4652  // TODO: may need modification to handle slice coding
4653  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4654  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4655  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4656  return;
4657  }
4658  }
4659  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4660  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4661  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4662  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4663  if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4664  s->first_slice_line = 0;
4665  }
4666  if (apply_loop_filter) {
4667  s->mb_x = 0;
4669  for (; s->mb_x < s->mb_width; s->mb_x++) {
4672  }
4673  }
4674  if (s->end_mb_y >= s->start_mb_y)
4675  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4676  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4677  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4678 }
4679 
4681 {
4682  MpegEncContext *s = &v->s;
4683 
4684  /* select codingmode used for VLC tables selection */
4685  switch (v->c_ac_table_index) {
4686  case 0:
4688  break;
4689  case 1:
4691  break;
4692  case 2:
4694  break;
4695  }
4696 
4697  switch (v->c_ac_table_index) {
4698  case 0:
4700  break;
4701  case 1:
4703  break;
4704  case 2:
4706  break;
4707  }
4708 
4709  s->first_slice_line = 1;
4710  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4711  s->mb_x = 0;
4713  for (; s->mb_x < s->mb_width; s->mb_x++) {
4715 
4716  if (v->fcm == ILACE_FIELD)
4718  else
4719  vc1_decode_b_mb(v);
4720  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4721  // TODO: may need modification to handle slice coding
4722  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4723  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4724  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4725  return;
4726  }
4727  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4728  }
4729  if (!v->s.loop_filter)
4730  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4731  else if (s->mb_y)
4732  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4733  s->first_slice_line = 0;
4734  }
4735  if (v->s.loop_filter)
4736  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4737  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4738  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4739 }
4740 
4742 {
4743  MpegEncContext *s = &v->s;
4744 
4745  if (!v->s.last_picture.f.data[0])
4746  return;
4747 
4748  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4749  s->first_slice_line = 1;
4750  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4751  s->mb_x = 0;
4754  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4755  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4756  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4757  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4758  s->first_slice_line = 0;
4759  }
4761 }
4762 
4764 {
4765 
4766  v->s.esc3_level_length = 0;
4767  if (v->x8_type) {
4768  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4769  } else {
4770  v->cur_blk_idx = 0;
4771  v->left_blk_idx = -1;
4772  v->topleft_blk_idx = 1;
4773  v->top_blk_idx = 2;
4774  switch (v->s.pict_type) {
4775  case AV_PICTURE_TYPE_I:
4776  if (v->profile == PROFILE_ADVANCED)
4778  else
4780  break;
4781  case AV_PICTURE_TYPE_P:
4782  if (v->p_frame_skipped)
4784  else
4786  break;
4787  case AV_PICTURE_TYPE_B:
4788  if (v->bi_type) {
4789  if (v->profile == PROFILE_ADVANCED)
4791  else
4793  } else
4795  break;
4796  }
4797  }
4798 }
4799 
4800 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4801 
4802 typedef struct {
4814  int coefs[2][7];
4815 
4816  int effect_type, effect_flag;
4817  int effect_pcount1, effect_pcount2;
4818  int effect_params1[15], effect_params2[10];
4819 } SpriteData;
4820 
4821 static inline int get_fp_val(GetBitContext* gb)
4822 {
4823  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4824 }
4825 
4826 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4827 {
4828  c[1] = c[3] = 0;
4829 
4830  switch (get_bits(gb, 2)) {
4831  case 0:
4832  c[0] = 1 << 16;
4833  c[2] = get_fp_val(gb);
4834  c[4] = 1 << 16;
4835  break;
4836  case 1:
4837  c[0] = c[4] = get_fp_val(gb);
4838  c[2] = get_fp_val(gb);
4839  break;
4840  case 2:
4841  c[0] = get_fp_val(gb);
4842  c[2] = get_fp_val(gb);
4843  c[4] = get_fp_val(gb);
4844  break;
4845  case 3:
4846  c[0] = get_fp_val(gb);
4847  c[1] = get_fp_val(gb);
4848  c[2] = get_fp_val(gb);
4849  c[3] = get_fp_val(gb);
4850  c[4] = get_fp_val(gb);
4851  break;
4852  }
4853  c[5] = get_fp_val(gb);
4854  if (get_bits1(gb))
4855  c[6] = get_fp_val(gb);
4856  else
4857  c[6] = 1 << 16;
4858 }
4859 
4860 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4861 {
4862  AVCodecContext *avctx = v->s.avctx;
4863  int sprite, i;
4864 
4865  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4866  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4867  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4868  av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4869  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4870  for (i = 0; i < 7; i++)
4871  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4872  sd->coefs[sprite][i] / (1<<16),
4873  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4874  av_log(avctx, AV_LOG_DEBUG, "\n");
4875  }
4876 
4877  skip_bits(gb, 2);
4878  if (sd->effect_type = get_bits_long(gb, 30)) {
4879  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4880  case 7:
4881  vc1_sprite_parse_transform(gb, sd->effect_params1);
4882  break;
4883  case 14:
4884  vc1_sprite_parse_transform(gb, sd->effect_params1);
4885  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4886  break;
4887  default:
4888  for (i = 0; i < sd->effect_pcount1; i++)
4889  sd->effect_params1[i] = get_fp_val(gb);
4890  }
4891  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4892  // effect 13 is simple alpha blending and matches the opacity above
4893  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4894  for (i = 0; i < sd->effect_pcount1; i++)
4895  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4896  sd->effect_params1[i] / (1 << 16),
4897  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4898  av_log(avctx, AV_LOG_DEBUG, "\n");
4899  }
4900 
4901  sd->effect_pcount2 = get_bits(gb, 16);
4902  if (sd->effect_pcount2 > 10) {
4903  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
4904  return;
4905  } else if (sd->effect_pcount2) {
4906  i = -1;
4907  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
4908  while (++i < sd->effect_pcount2) {
4909  sd->effect_params2[i] = get_fp_val(gb);
4910  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4911  sd->effect_params2[i] / (1 << 16),
4912  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4913  }
4914  av_log(avctx, AV_LOG_DEBUG, "\n");
4915  }
4916  }
4917  if (sd->effect_flag = get_bits1(gb))
4918  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
4919 
4920  if (get_bits_count(gb) >= gb->size_in_bits +
4921  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
4922  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
4923  if (get_bits_count(gb) < gb->size_in_bits - 8)
4924  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
4925 }
4926 
4927 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
4928 {
4929  int i, plane, row, sprite;
4930  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4931  uint8_t* src_h[2][2];
4932  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4933  int ysub[2];
4934  MpegEncContext *s = &v->s;
4935 
4936  for (i = 0; i < 2; i++) {
4937  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
4938  xadv[i] = sd->coefs[i][0];
4939  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
4940  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
4941 
4942  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
4943  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
4944  }
4945  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4946 
4947  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
4948  int width = v->output_width>>!!plane;
4949 
4950  for (row = 0; row < v->output_height>>!!plane; row++) {
4951  uint8_t *dst = v->sprite_output_frame.data[plane] +
4952  v->sprite_output_frame.linesize[plane] * row;
4953 
4954  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4955  uint8_t *iplane = s->current_picture.f.data[plane];
4956  int iline = s->current_picture.f.linesize[plane];
4957  int ycoord = yoff[sprite] + yadv[sprite] * row;
4958  int yline = ycoord >> 16;
4959  int next_line;
4960  ysub[sprite] = ycoord & 0xFFFF;
4961  if (sprite) {
4962  iplane = s->last_picture.f.data[plane];
4963  iline = s->last_picture.f.linesize[plane];
4964  }
4965  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
4966  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4967  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4968  if (ysub[sprite])
4969  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
4970  } else {
4971  if (sr_cache[sprite][0] != yline) {
4972  if (sr_cache[sprite][1] == yline) {
4973  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
4974  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
4975  } else {
4976  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4977  sr_cache[sprite][0] = yline;
4978  }
4979  }
4980  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4981  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
4982  iplane + next_line, xoff[sprite],
4983  xadv[sprite], width);
4984  sr_cache[sprite][1] = yline + 1;
4985  }
4986  src_h[sprite][0] = v->sr_rows[sprite][0];
4987  src_h[sprite][1] = v->sr_rows[sprite][1];
4988  }
4989  }
4990 
4991  if (!v->two_sprites) {
4992  if (ysub[0]) {
4993  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
4994  } else {
4995  memcpy(dst, src_h[0][0], width);
4996  }
4997  } else {
4998  if (ysub[0] && ysub[1]) {
4999  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5000  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5001  } else if (ysub[0]) {
5002  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5003  src_h[1][0], alpha, width);
5004  } else if (ysub[1]) {
5005  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5006  src_h[0][0], (1<<16)-1-alpha, width);
5007  } else {
5008  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5009  }
5010  }
5011  }
5012 
5013  if (!plane) {
5014  for (i = 0; i < 2; i++) {
5015  xoff[i] >>= 1;
5016  yoff[i] >>= 1;
5017  }
5018  }
5019 
5020  }
5021 }
5022 
5023 
5024 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5025 {
5026  MpegEncContext *s = &v->s;
5027  AVCodecContext *avctx = s->avctx;
5028  SpriteData sd;
5029 
5030  vc1_parse_sprites(v, gb, &sd);
5031 
5032  if (!s->current_picture.f.data[0]) {
5033  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5034  return -1;
5035  }
5036 
5037  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5038  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5039  v->two_sprites = 0;
5040  }
5041 
5042  if (v->sprite_output_frame.data[0])
5043  avctx->release_buffer(avctx, &v->sprite_output_frame);
5044 
5047  if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
5048  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5049  return -1;
5050  }
5051 
5052  vc1_draw_sprites(v, &sd);
5053 
5054  return 0;
5055 }
5056 
5057 static void vc1_sprite_flush(AVCodecContext *avctx)
5058 {
5059  VC1Context *v = avctx->priv_data;
5060  MpegEncContext *s = &v->s;
5061  AVFrame *f = &s->current_picture.f;
5062  int plane, i;
5063 
5064  /* Windows Media Image codecs have a convergence interval of two keyframes.
5065  Since we can't enforce it, clear to black the missing sprite. This is
5066  wrong but it looks better than doing nothing. */
5067 
5068  if (f->data[0])
5069  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5070  for (i = 0; i < v->sprite_height>>!!plane; i++)
5071  memset(f->data[plane] + i * f->linesize[plane],
5072  plane ? 128 : 0, f->linesize[plane]);
5073 }
5074 
5075 #endif
5076 
5078 {
5079  MpegEncContext *s = &v->s;
5080  int i;
5081 
5082  /* Allocate mb bitplanes */
5087  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5089 
5090  v->n_allocated_blks = s->mb_width + 2;
5091  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5092  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5093  v->cbp = v->cbp_base + s->mb_stride;
5094  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5095  v->ttblk = v->ttblk_base + s->mb_stride;
5096  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5097  v->is_intra = v->is_intra_base + s->mb_stride;
5098  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5099  v->luma_mv = v->luma_mv_base + s->mb_stride;
5100 
5101  /* allocate block type info in that way so it could be used with s->block_index[] */
5102  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5103  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5104  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5105  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5106 
5107  /* allocate memory to store block level MV info */
5108  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5109  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5110  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5111  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5112  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5113  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5114  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5115  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5116  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5117  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5118  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5119 
5120  /* Init coded blocks info */
5121  if (v->profile == PROFILE_ADVANCED) {
5122 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5123 // return -1;
5124 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5125 // return -1;
5126  }
5127 
5128  ff_intrax8_common_init(&v->x8,s);
5129 
5131  for (i = 0; i < 4; i++)
5132  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5133  }
5134 
5135  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5136  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5137  !v->mb_type_base) {
5140  av_freep(&v->acpred_plane);
5142  av_freep(&v->block);
5143  av_freep(&v->cbp_base);
5144  av_freep(&v->ttblk_base);
5145  av_freep(&v->is_intra_base);
5146  av_freep(&v->luma_mv_base);
5147  av_freep(&v->mb_type_base);
5148  return AVERROR(ENOMEM);
5149  }
5150 
5151  return 0;
5152 }
5153 
5155 {
5156  int i;
5157  for (i = 0; i < 64; i++) {
5158 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5159  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5160  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5161  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5162  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5164  }
5165  v->left_blk_sh = 0;
5166  v->top_blk_sh = 3;
5167 }
5168 
5174 {
5175  VC1Context *v = avctx->priv_data;
5176  MpegEncContext *s = &v->s;
5177  GetBitContext gb;
5178 
5179  /* save the container output size for WMImage */
5180  v->output_width = avctx->width;
5181  v->output_height = avctx->height;
5182 
5183  if (!avctx->extradata_size || !avctx->extradata)
5184  return -1;
5185  if (!(avctx->flags & CODEC_FLAG_GRAY))
5186  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5187  else
5188  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5189  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5190  v->s.avctx = avctx;
5191  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5192  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5193 
5194  if (avctx->idct_algo == FF_IDCT_AUTO) {
5195  avctx->idct_algo = FF_IDCT_WMV2;
5196  }
5197 
5198  if (ff_vc1_init_common(v) < 0)
5199  return -1;
5200  ff_vc1dsp_init(&v->vc1dsp);
5201 
5202  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5203  int count = 0;
5204 
5205  // looks like WMV3 has a sequence header stored in the extradata
5206  // advanced sequence header may be before the first frame
5207  // the last byte of the extradata is a version number, 1 for the
5208  // samples we can decode
5209 
5210  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5211 
5212  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5213  return -1;
5214 
5215  count = avctx->extradata_size*8 - get_bits_count(&gb);
5216  if (count > 0) {
5217  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5218  count, get_bits(&gb, count));
5219  } else if (count < 0) {
5220  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5221  }
5222  } else { // VC1/WVC1/WVP2
5223  const uint8_t *start = avctx->extradata;
5224  uint8_t *end = avctx->extradata + avctx->extradata_size;
5225  const uint8_t *next;
5226  int size, buf2_size;
5227  uint8_t *buf2 = NULL;
5228  int seq_initialized = 0, ep_initialized = 0;
5229 
5230  if (avctx->extradata_size < 16) {
5231  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5232  return -1;
5233  }
5234 
5236  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5237  next = start;
5238  for (; next < end; start = next) {
5239  next = find_next_marker(start + 4, end);
5240  size = next - start - 4;
5241  if (size <= 0)
5242  continue;
5243  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5244  init_get_bits(&gb, buf2, buf2_size * 8);
5245  switch (AV_RB32(start)) {
5246  case VC1_CODE_SEQHDR:
5247  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5248  av_free(buf2);
5249  return -1;
5250  }
5251  seq_initialized = 1;
5252  break;
5253  case VC1_CODE_ENTRYPOINT:
5254  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5255  av_free(buf2);
5256  return -1;
5257  }
5258  ep_initialized = 1;
5259  break;
5260  }
5261  }
5262  av_free(buf2);
5263  if (!seq_initialized || !ep_initialized) {
5264  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5265  return -1;
5266  }
5267  v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5268  }
5269 
5270  avctx->profile = v->profile;
5271  if (v->profile == PROFILE_ADVANCED)
5272  avctx->level = v->level;
5273 
5274  avctx->has_b_frames = !!avctx->max_b_frames;
5275 
5276  s->mb_width = (avctx->coded_width + 15) >> 4;
5277  s->mb_height = (avctx->coded_height + 15) >> 4;
5278 
5279  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5281  } else {
5282  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5283  v->left_blk_sh = 3;
5284  v->top_blk_sh = 0;
5285  }
5286 
5287  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5288  v->sprite_width = avctx->coded_width;
5289  v->sprite_height = avctx->coded_height;
5290 
5291  avctx->coded_width = avctx->width = v->output_width;
5292  avctx->coded_height = avctx->height = v->output_height;
5293 
5294  // prevent 16.16 overflows
5295  if (v->sprite_width > 1 << 14 ||
5296  v->sprite_height > 1 << 14 ||
5297  v->output_width > 1 << 14 ||
5298  v->output_height > 1 << 14) return -1;
5299  }
5300  return 0;
5301 }
5302 
5307 {
5308  VC1Context *v = avctx->priv_data;
5309  int i;
5310 
5311  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5312  && v->sprite_output_frame.data[0])
5313  avctx->release_buffer(avctx, &v->sprite_output_frame);
5314  for (i = 0; i < 4; i++)
5315  av_freep(&v->sr_rows[i >> 1][i & 1]);
5316  av_freep(&v->hrd_rate);
5317  av_freep(&v->hrd_buffer);
5318  ff_MPV_common_end(&v->s);
5322  av_freep(&v->fieldtx_plane);
5323  av_freep(&v->acpred_plane);
5325  av_freep(&v->mb_type_base);
5327  av_freep(&v->mv_f_base);
5328  av_freep(&v->mv_f_last_base);
5329  av_freep(&v->mv_f_next_base);
5330  av_freep(&v->block);
5331  av_freep(&v->cbp_base);
5332  av_freep(&v->ttblk_base);
5333  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5334  av_freep(&v->luma_mv_base);
5336  return 0;
5337 }
5338 
5339 
5343 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5344  int *got_frame, AVPacket *avpkt)
5345 {
5346  const uint8_t *buf = avpkt->data;
5347  int buf_size = avpkt->size, n_slices = 0, i;
5348  VC1Context *v = avctx->priv_data;
5349  MpegEncContext *s = &v->s;
5350  AVFrame *pict = data;
5351  uint8_t *buf2 = NULL;
5352  const uint8_t *buf_start = buf;
5353  int mb_height, n_slices1;
5354  struct {
5355  uint8_t *buf;
5356  GetBitContext gb;
5357  int mby_start;
5358  } *slices = NULL, *tmp;
5359 
5360  /* no supplementary picture */
5361  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5362  /* special case for last picture */
5363  if (s->low_delay == 0 && s->next_picture_ptr) {
5364  *pict = s->next_picture_ptr->f;
5365  s->next_picture_ptr = NULL;
5366 
5367  *got_frame = 1;
5368  }
5369 
5370  return 0;
5371  }
5372 
5374  if (v->profile < PROFILE_ADVANCED)
5375  avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5376  else
5377  avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5378  }
5379 
5380  //for advanced profile we may need to parse and unescape data
5381  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5382  int buf_size2 = 0;
5383  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5384 
5385  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5386  const uint8_t *start, *end, *next;
5387  int size;
5388 
5389  next = buf;
5390  for (start = buf, end = buf + buf_size; next < end; start = next) {
5391  next = find_next_marker(start + 4, end);
5392  size = next - start - 4;
5393  if (size <= 0) continue;
5394  switch (AV_RB32(start)) {
5395  case VC1_CODE_FRAME:
5396  if (avctx->hwaccel ||
5398  buf_start = start;
5399  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5400  break;
5401  case VC1_CODE_FIELD: {
5402  int buf_size3;
5403  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5404  if (!tmp)
5405  goto err;
5406  slices = tmp;
5407  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5408  if (!slices[n_slices].buf)
5409  goto err;
5410  buf_size3 = vc1_unescape_buffer(start + 4, size,
5411  slices[n_slices].buf);
5412  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5413  buf_size3 << 3);
5414  /* assuming that the field marker is at the exact middle,
5415  hope it's correct */
5416  slices[n_slices].mby_start = s->mb_height >> 1;
5417  n_slices1 = n_slices - 1; // index of the last slice of the first field
5418  n_slices++;
5419  break;
5420  }
5421  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5422  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5423  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5424  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5425  break;
5426  case VC1_CODE_SLICE: {
5427  int buf_size3;
5428  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5429  if (!tmp)
5430  goto err;
5431  slices = tmp;
5432  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5433  if (!slices[n_slices].buf)
5434  goto err;
5435  buf_size3 = vc1_unescape_buffer(start + 4, size,
5436  slices[n_slices].buf);
5437  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5438  buf_size3 << 3);
5439  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5440  n_slices++;
5441  break;
5442  }
5443  }
5444  }
5445  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5446  const uint8_t *divider;
5447  int buf_size3;
5448 
5449  divider = find_next_marker(buf, buf + buf_size);
5450  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5451  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5452  goto err;
5453  } else { // found field marker, unescape second field
5454  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5455  if (!tmp)
5456  goto err;
5457  slices = tmp;
5458  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5459  if (!slices[n_slices].buf)
5460  goto err;
5461  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5462  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5463  buf_size3 << 3);
5464  slices[n_slices].mby_start = s->mb_height >> 1;
5465  n_slices1 = n_slices - 1;
5466  n_slices++;
5467  }
5468  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5469  } else {
5470  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5471  }
5472  init_get_bits(&s->gb, buf2, buf_size2*8);
5473  } else
5474  init_get_bits(&s->gb, buf, buf_size*8);
5475 
5476  if (v->res_sprite) {
5477  v->new_sprite = !get_bits1(&s->gb);
5478  v->two_sprites = get_bits1(&s->gb);
5479  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5480  we're using the sprite compositor. These are intentionally kept separate
5481  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5482  the vc1 one for WVP2 */
5483  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5484  if (v->new_sprite) {
5485  // switch AVCodecContext parameters to those of the sprites
5486  avctx->width = avctx->coded_width = v->sprite_width;
5487  avctx->height = avctx->coded_height = v->sprite_height;
5488  } else {
5489  goto image;
5490  }
5491  }
5492  }
5493 
5494  if (s->context_initialized &&
5495  (s->width != avctx->coded_width ||
5496  s->height != avctx->coded_height)) {
5497  ff_vc1_decode_end(avctx);
5498  }
5499 
5500  if (!s->context_initialized) {
5501  if (ff_msmpeg4_decode_init(avctx) < 0)
5502  goto err;
5503  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5504  ff_MPV_common_end(s);
5505  goto err;
5506  }
5507 
5508  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5509 
5510  if (v->profile == PROFILE_ADVANCED) {
5511  s->h_edge_pos = avctx->coded_width;
5512  s->v_edge_pos = avctx->coded_height;
5513  }
5514  }
5515 
5516  /* We need to set current_picture_ptr before reading the header,
5517  * otherwise we cannot store anything in there. */
5518  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5519  int i = ff_find_unused_picture(s, 0);
5520  if (i < 0)
5521  goto err;
5522  s->current_picture_ptr = &s->picture[i];
5523  }
5524 
5525  // do parse frame header
5526  v->pic_header_flag = 0;
5527  if (v->profile < PROFILE_ADVANCED) {
5528  if (ff_vc1_parse_frame_header(v, &s->gb) == -1) {
5529  goto err;
5530  }
5531  } else {
5532  if (ff_vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5533  goto err;
5534  }
5535  }
5536 
5537  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5538  && s->pict_type != AV_PICTURE_TYPE_I) {
5539  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5540  goto err;
5541  }
5542 
5543  // process pulldown flags
5545  // Pulldown flags are only valid when 'broadcast' has been set.
5546  // So ticks_per_frame will be 2
5547  if (v->rff) {
5548  // repeat field
5550  } else if (v->rptfrm) {
5551  // repeat frames
5552  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5553  }
5554 
5555  // for skipping the frame
5558 
5559  /* skip B-frames if we don't have reference frames */
5560  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5561  goto err;
5562  }
5563  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5564  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5565  avctx->skip_frame >= AVDISCARD_ALL) {
5566  goto end;
5567  }
5568 
5569  if (s->next_p_frame_damaged) {
5570  if (s->pict_type == AV_PICTURE_TYPE_B)
5571  goto end;
5572  else
5573  s->next_p_frame_damaged = 0;
5574  }
5575 
5576  if (ff_MPV_frame_start(s, avctx) < 0) {
5577  goto err;
5578  }
5579 
5582 
5585  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5586  else if (avctx->hwaccel) {
5587  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5588  goto err;
5589  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5590  goto err;
5591  if (avctx->hwaccel->end_frame(avctx) < 0)
5592  goto err;
5593  } else {
5594  int header_ret = 0;
5595 
5596  ff_er_frame_start(s);
5597 
5598  v->bits = buf_size * 8;
5599  v->end_mb_x = s->mb_width;
5600  if (v->field_mode) {
5601  uint8_t *tmp[2];
5602  s->current_picture.f.linesize[0] <<= 1;
5603  s->current_picture.f.linesize[1] <<= 1;
5604  s->current_picture.f.linesize[2] <<= 1;
5605  s->linesize <<= 1;
5606  s->uvlinesize <<= 1;
5607  tmp[0] = v->mv_f_last[0];
5608  tmp[1] = v->mv_f_last[1];
5609  v->mv_f_last[0] = v->mv_f_next[0];
5610  v->mv_f_last[1] = v->mv_f_next[1];
5611  v->mv_f_next[0] = v->mv_f[0];
5612  v->mv_f_next[1] = v->mv_f[1];
5613  v->mv_f[0] = tmp[0];
5614  v->mv_f[1] = tmp[1];
5615  }
5616  mb_height = s->mb_height >> v->field_mode;
5617 
5618  if (!mb_height) {
5619  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
5620  goto err;
5621  }
5622 
5623  for (i = 0; i <= n_slices; i++) {
5624  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5625  if (v->field_mode <= 0) {
5626  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5627  "picture boundary (%d >= %d)\n", i,
5628  slices[i - 1].mby_start, mb_height);
5629  continue;
5630  }
5631  v->second_field = 1;
5632  v->blocks_off = s->mb_width * s->mb_height << 1;
5633  v->mb_off = s->mb_stride * s->mb_height >> 1;
5634  } else {
5635  v->second_field = 0;
5636  v->blocks_off = 0;
5637  v->mb_off = 0;
5638  }
5639  if (i) {
5640  v->pic_header_flag = 0;
5641  if (v->field_mode && i == n_slices1 + 2) {
5642  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5643  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
5644  continue;
5645  }
5646  } else if (get_bits1(&s->gb)) {
5647  v->pic_header_flag = 1;
5648  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5649  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
5650  continue;
5651  }
5652  }
5653  }
5654  if (header_ret < 0)
5655  continue;
5656  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5657  if (!v->field_mode || v->second_field)
5658  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5659  else
5660  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5662  if (i != n_slices)
5663  s->gb = slices[i].gb;
5664  }
5665  if (v->field_mode) {
5666  v->second_field = 0;
5667  if (s->pict_type == AV_PICTURE_TYPE_B) {
5668  memcpy(v->mv_f_base, v->mv_f_next_base,
5669  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5670  }
5671  s->current_picture.f.linesize[0] >>= 1;
5672  s->current_picture.f.linesize[1] >>= 1;
5673  s->current_picture.f.linesize[2] >>= 1;
5674  s->linesize >>= 1;
5675  s->uvlinesize >>= 1;
5676  }
5677  av_dlog(s->avctx, "Consumed %i/%i bits\n",
5678  get_bits_count(&s->gb), s->gb.size_in_bits);
5679 // if (get_bits_count(&s->gb) > buf_size * 8)
5680 // return -1;
5681  ff_er_frame_end(s);
5682  }
5683 
5684  ff_MPV_frame_end(s);
5685 
5686  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5687 image:
5688  avctx->width = avctx->coded_width = v->output_width;
5689  avctx->height = avctx->coded_height = v->output_height;
5690  if (avctx->skip_frame >= AVDISCARD_NONREF)
5691  goto end;
5692 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5693  if (vc1_decode_sprites(v, &s->gb))
5694  goto err;
5695 #endif
5696  *pict = v->sprite_output_frame;
5697  *got_frame = 1;
5698  } else {
5699  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5700  *pict = s->current_picture_ptr->f;
5701  } else if (s->last_picture_ptr != NULL) {
5702  *pict = s->last_picture_ptr->f;
5703  }
5704  if (s->last_picture_ptr || s->low_delay) {
5705  *got_frame = 1;
5706  ff_print_debug_info(s, pict);
5707  }
5708  }
5709 
5710 end:
5711  av_free(buf2);
5712  for (i = 0; i < n_slices; i++)
5713  av_free(slices[i].buf);
5714  av_free(slices);
5715  return buf_size;
5716 
5717 err:
5718  av_free(buf2);
5719  for (i = 0; i < n_slices; i++)
5720  av_free(slices[i].buf);
5721  av_free(slices);
5722  return -1;
5723 }
5724 
5725 
5726 static const AVProfile profiles[] = {
5727  { FF_PROFILE_VC1_SIMPLE, "Simple" },
5728  { FF_PROFILE_VC1_MAIN, "Main" },
5729  { FF_PROFILE_VC1_COMPLEX, "Complex" },
5730  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5731  { FF_PROFILE_UNKNOWN },
5732 };
5733 
5735  .name = "vc1",
5736  .type = AVMEDIA_TYPE_VIDEO,
5737  .id = AV_CODEC_ID_VC1,
5738  .priv_data_size = sizeof(VC1Context),
5739  .init = vc1_decode_init,
5742  .flush = ff_mpeg_flush,
5743  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5744  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5745  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5746  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5747 };
5748 
5749 #if CONFIG_WMV3_DECODER
5750 AVCodec ff_wmv3_decoder = {
5751  .name = "wmv3",
5752  .type = AVMEDIA_TYPE_VIDEO,
5753  .id = AV_CODEC_ID_WMV3,
5754  .priv_data_size = sizeof(VC1Context),
5755  .init = vc1_decode_init,
5758  .flush = ff_mpeg_flush,
5759  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5760  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5761  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5762  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5763 };
5764 #endif
5765 
5766 #if CONFIG_WMV3_VDPAU_DECODER
5767 AVCodec ff_wmv3_vdpau_decoder = {
5768  .name = "wmv3_vdpau",
5769  .type = AVMEDIA_TYPE_VIDEO,
5770  .id = AV_CODEC_ID_WMV3,
5771  .priv_data_size = sizeof(VC1Context),
5772  .init = vc1_decode_init,
5776  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5777  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
5778  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5779 };
5780 #endif
5781 
5782 #if CONFIG_VC1_VDPAU_DECODER
5783 AVCodec ff_vc1_vdpau_decoder = {
5784  .name = "vc1_vdpau",
5785  .type = AVMEDIA_TYPE_VIDEO,
5786  .id = AV_CODEC_ID_VC1,
5787  .priv_data_size = sizeof(VC1Context),
5788  .init = vc1_decode_init,
5792  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5793  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
5794  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5795 };
5796 #endif
5797 
5798 #if CONFIG_WMV3IMAGE_DECODER
5799 AVCodec ff_wmv3image_decoder = {
5800  .name = "wmv3image",
5801  .type = AVMEDIA_TYPE_VIDEO,
5802  .id = AV_CODEC_ID_WMV3IMAGE,
5803  .priv_data_size = sizeof(VC1Context),
5804  .init = vc1_decode_init,
5807  .capabilities = CODEC_CAP_DR1,
5808  .flush = vc1_sprite_flush,
5809  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5810  .pix_fmts = ff_pixfmt_list_420
5811 };
5812 #endif
5813 
5814 #if CONFIG_VC1IMAGE_DECODER
5815 AVCodec ff_vc1image_decoder = {
5816  .name = "vc1image",
5817  .type = AVMEDIA_TYPE_VIDEO,
5818  .id = AV_CODEC_ID_VC1IMAGE,
5819  .priv_data_size = sizeof(VC1Context),
5820  .init = vc1_decode_init,
5823  .capabilities = CODEC_CAP_DR1,
5824  .flush = vc1_sprite_flush,
5825  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5826  .pix_fmts = ff_pixfmt_list_420
5827 };
5828 #endif
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
Definition: vc1dec.c:250
in the bitstream is reported as 00b
Definition: vc1.h:172
enum AVPixelFormat ff_hwaccel_pixfmt_list_420[]
Definition: mpegvideo.c:133
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
Definition: vc1data.c:34
op_pixels_func avg_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:58
int use_ic
use intensity compensation in B-frames
Definition: vc1.h:300
#define VC1_TTBLK_VLC_BITS
Definition: vc1data.c:126
void(* vc1_h_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:44
const struct AVCodec * codec
Definition: avcodec.h:1348
int topleft_blk_idx
Definition: vc1.h:389
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
discard all frames except keyframes
Definition: avcodec.h:535
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2458
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:259
#define VC1_IF_MBMODE_VLC_BITS
Definition: vc1data.c:145
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3106
int p_frame_skipped
Definition: vc1.h:384
Imode
Imode types.
Definition: vc1.c:54
uint8_t luty[256]
Definition: vc1.h:299
The VC1 Context.
Definition: vc1.h:181
int size
int esc3_level_length
Definition: mpegvideo.h:618
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
VLC ff_vc1_ttblk_vlc[3]
Definition: vc1data.c:127
#define VC1_ICBPCY_VLC_BITS
Definition: vc1data.c:120
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:37
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
Definition: vc1dec.c:3454
int k_x
Number of bits for MVs (depends on MV range)
Definition: vc1.h:241
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:363
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:286
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:324
uint8_t * mv_f_base
Definition: vc1.h:354
int coded_width
Bitstream width / height, may be different from width/height.
Definition: avcodec.h:1515
int mv_type_is_raw
mv type mb plane is not coded
Definition: vc1.h:295
int buffer_hints
codec suggestion on buffer type if != 0
Definition: avcodec.h:1253
void(* release_buffer)(struct AVCodecContext *c, AVFrame *pic)
Called to release buffers which were allocated with get_buffer.
Definition: avcodec.h:2259
#define B
Definition: dsputil.c:1897
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1dec.c:1315
uint8_t dmvrange
Frame decoding info for interlaced picture.
Definition: vc1.h:338
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
#define AC_VLC_BITS
Definition: intrax8.c:35
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1284
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
Definition: vc1acdata.h:34
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:287
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1588
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:330
static const int vc1_last_decode_table[AC_MODES]
Definition: vc1acdata.h:30
int tt_index
Index for Transform Type tables (to decode TTMB)
Definition: vc1.h:291
static void vc1_decode_p_blocks(VC1Context *v)
Definition: vc1dec.c:4605
static void vc1_put_signed_blocks_clamped(VC1Context *v)
Definition: vc1dec.c:78
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:251
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
Definition: vc1dec.c:1080
#define VC1_2REF_MVDATA_VLC_BITS
Definition: vc1data.c:140
uint8_t * mv_f_last_base
Definition: vc1.h:355
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:68
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: avcodec.h:1225
static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
Definition: vc1dec.c:2931
int size
Definition: avcodec.h:916
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:305
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4.c:588
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
int frfd
Definition: vc1.h:372
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
Definition: vc1.h:245
static void vc1_decode_b_blocks(VC1Context *v)
Definition: vc1dec.c:4680
#define wrap(func)
Definition: w64xmmtest.h:70
mpegvideo header.
int top_blk_idx
Definition: vc1.h:389
IntraX8Context x8
Definition: vc1.h:183
VLC * imv_vlc
Definition: vc1.h:345
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mb_type_base
Definition: vc1.h:270
discard all
Definition: avcodec.h:536
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:354
int sprite_height
Definition: vc1.h:380
uint8_t run
Definition: svq3.c:124
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:228
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:397
struct VC1Context VC1Context
The VC1 Context.
int profile
profile
Definition: avcodec.h:2815
AVCodec.
Definition: avcodec.h:2960
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:49
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1dec.c:4763
int block_wrap[6]
Definition: mpegvideo.h:434
uint8_t rff
Definition: vc1.h:314
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1dec.c:2246
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
Definition: vc1dec.c:2454
enum AVDiscard skip_frame
Definition: avcodec.h:2907
int bits
Definition: vc1.h:186
int range_x
Definition: vc1.h:243
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:122
static void vc1_apply_p_loop_filter(VC1Context *v)
Definition: vc1dec.c:3427
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2622
int esc3_run_length
Definition: mpegvideo.h:619
int refdist
distance of the current picture from reference
Definition: vc1.h:360
uint8_t * acpred_plane
AC prediction flags bitplane.
Definition: vc1.h:324
VC-1 tables.
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
int bi_type
Definition: vc1.h:385
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
#define MB_TYPE_INTRA
Definition: mpegvideo.h:104
static const AVProfile profiles[]
Definition: vc1dec.c:5726
static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2556
uint8_t bits
Definition: crc.c:31
uint8_t
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:47
uint16_t * hrd_rate
Definition: vc1.h:329
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
Definition: vc1dsp.h:70
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:40
#define DC_VLC_BITS
Definition: vc1dec.c:46
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: dsputil.h:322
int left_blk_idx
Definition: vc1.h:389
static void vc1_mc_4mv_chroma4(VC1Context *v)
Do motion compensation for 4-MV field chroma macroblock (both U and V)
Definition: vc1dec.c:943
#define AV_RB32
Definition: intreadwrite.h:130
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:41
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:208
int y_ac_table_index
Luma index from AC2FRM element.
Definition: vc1.h:261
#define b
Definition: input.c:52
int second_field
Definition: vc1.h:359
int n_allocated_blks
Definition: vc1.h:389
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:198
void(* vc1_inv_trans_8x8)(DCTELEM *b)
Definition: vc1dsp.h:35
int c_ac_table_index
AC coding set indexes.
Definition: vc1.h:260
const int ff_vc1_ac_sizes[AC_MODES]
Definition: vc1data.c:1133
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
int ttfrm
Transform type info present at frame level.
Definition: vc1.h:263
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:313
uint8_t lutuv[256]
lookup tables used for intensity compensation
Definition: vc1.h:299
int codingset2
index of current table set from 11.8 to use for chroma block decoding
Definition: vc1.h:267
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:278
int16_t((* luma_mv)[2]
Definition: vc1.h:392
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:271
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:225
const char data[16]
Definition: mxf.c:66
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
Definition: vc1dec.c:182
MSMPEG4 data tables.
#define CONFIG_VC1_VDPAU_DECODER
Definition: config.h:511
uint8_t * data
Definition: avcodec.h:915
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:50
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1187
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:39
uint8_t * forward_mb_plane
bitplane for "forward" MBs
Definition: vc1.h:294
WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
Definition: pixfmt.h:106
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:247
enum AVPixelFormat ff_pixfmt_list_420[]
Definition: mpegvideo.c:128
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
Definition: vc1.c:294
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
Definition: vc1dec.c:2004
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1573
int fieldtx_is_raw
Definition: vc1.h:351
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1dec.c:548
uint8_t * over_flags_plane
Overflags bitplane.
Definition: vc1.h:326
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
Definition: vc1dec.c:4024
uint8_t fourmvbp
Definition: vc1.h:349
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
Definition: vc1data.c:1065
int range_y
MV range.
Definition: vc1.h:243
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:823
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: dsputil.h:312
uint8_t ttmbf
Transform type flag.
Definition: vc1.h:264
Definition: vc1.h:142
int k_y
Number of bits for MVs (depends on MV range)
Definition: vc1.h:242
void(* add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:205
#define transpose(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:547
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:323
enum AVCodecID id
Definition: avcodec.h:2974
uint8_t twomvbp
Definition: vc1.h:348
void(* put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:203
int dmb_is_raw
direct mb plane is raw
Definition: vc1.h:296
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
Definition: vc1dec.c:2312
#define VC1_CBPCY_P_VLC_BITS
Definition: vc1data.c:118
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1634
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:139
int overlap
overlapped transforms in use
Definition: vc1.h:232
in the bitstream is reported as 11b
Definition: vc1.h:174
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
Definition: avcodec.h:1132
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
Definition: vc1data.c:1022
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:42
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: dsputil.h:313
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
Definition: vc1dec.c:330
#define ER_MB_ERROR
Definition: mpegvideo.h:502
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
Definition: vc1dec.c:5173
#define GET_MQUANT()
Get macroblock-level quantizer scale.
Definition: vc1dec.c:1038
AVFrame sprite_output_frame
Definition: vc1.h:379
int capabilities
Codec capabilities.
Definition: avcodec.h:2979
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:337
static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2719
void(* vc1_v_s_overlap)(DCTELEM *top, DCTELEM *bottom)
Definition: vc1dsp.h:45
#define t1
Definition: regdef.h:29
uint8_t * mv_f_next_base
Definition: vc1.h:356
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
Definition: vc1dec.c:3141
VLC * mbmode_vlc
Definition: vc1.h:344
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
#define IS_MARKER(state, i, buf, buf_size)
Definition: dca_parser.c:39
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:557
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:570
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: dsputil.h:283
GetBitContext gb
Definition: mpegvideo.h:626
void(* clear_block)(DCTELEM *block)
Definition: dsputil.h:218
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:51
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1dec.c:1220
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:247
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:198
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
h264 Chroma MC
Definition: dsputil.h:321
int off
Definition: dsputil_bfin.c:28
int a_avail
Definition: vc1.h:269
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:353
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
Definition: vc1dec.c:2490
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2490
Definition: vf_drawbox.c:36
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
Definition: vc1data.c:1076
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:822
#define B_FRACTION_DEN
Definition: vc1data.h:99
VLC ff_vc1_ttmb_vlc[3]
Definition: vc1data.c:115
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1dec.c:1336
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:367
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
Definition: msmpeg4data.c:1831
VLC * twomvbp_vlc
Definition: vc1.h:346
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:248
AVCodec ff_vc1_decoder
Definition: vc1dec.c:5734
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2981
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1dec.c:1257
static DCTELEM block[64]
Definition: dct-test.c:169
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
Definition: avcodec.h:1065
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:5306
int x8_type
Definition: vc1.h:386
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:5154
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:509
uint8_t * blk_mv_type_base
Definition: vc1.h:353
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
Definition: intrax8.c:693
int field_mode
1 for interlaced field pictures
Definition: vc1.h:357
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
Definition: intrax8.c:711
int width
picture width / height.
Definition: avcodec.h:1508
int8_t zzi_8x8[64]
Definition: vc1.h:352
#define VC1_SUBBLKPAT_VLC_BITS
Definition: vc1data.c:128
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:332
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:2661
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:239
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:317
int fourmvswitch
Definition: vc1.h:339
int mb_off
Definition: vc1.h:369
static void vc1_decode_skip_blocks(VC1Context *v)
Definition: vc1dec.c:4741
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3303
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Get a buffer for a frame.
Definition: utils.c:464
int size_in_bits
Definition: get_bits.h:55
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:5077
void ff_er_frame_end(MpegEncContext *s)
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:573
static const int offset_table[6]
Definition: vc1dec.c:3301
static int median4(int a, int b, int c, int d)
Definition: vc1dec.c:535
int level
level
Definition: avcodec.h:2885
static int vc1_decode_p_mb_intfr(VC1Context *v)
Definition: vc1dec.c:3693
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:515
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:261
MotionEstContext me
Definition: mpegvideo.h:405
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
Definition: vc1dec.c:1118
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3367
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1370
uint32_t * cbp
Definition: vc1.h:390
int left_blk_sh
Definition: vc1.h:246
int16_t(* luma_mv_base)[2]
Definition: vc1.h:392
uint8_t * fieldtx_plane
Definition: vc1.h:350
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:433
int * ttblk_base
Definition: vc1.h:265
VLC * cbpcy_vlc
CBPCY VLC table.
Definition: vc1.h:290
static int decode210(GetBitContext *gb)
Definition: get_bits.h:539
static const float pred[4]
Definition: siprdata.h:259
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
Definition: vc1.h:381
static const int8_t mv[256][2]
Definition: 4xm.c:73
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:62
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
Definition: avcodec.h:1180
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
Definition: vc1dec.c:155
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1dec.c:1842
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:614
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1284
static const int offset_table1[9]
Definition: vc1dec.c:50
NULL
Definition: eval.c:52
static int width
Definition: utils.c:156
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:190
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:48
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Definition: vc1.h:246
op_pixels_func avg_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: dsputil.h:295
external API header
enum AVCodecID codec_id
Definition: avcodec.h:1350
int c_avail
Definition: vc1.h:269
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
Definition: vc1data.c:1047
static const uint8_t vc1_delta_run_table[AC_MODES][57]
Definition: vc1acdata.h:295
int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1489
uint32_t * cbp_base
Definition: vc1.h:390
main external API structure.
Definition: avcodec.h:1339
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
uint8_t * is_intra
Definition: vc1.h:391
static int vc1_decode_p_mb_intfi(VC1Context *v)
Definition: vc1dec.c:3902
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:328
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:215
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
Definition: vc1dec.c:4176
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1365
int16_t(*[2] motion_val)[2]
motion vector table
Definition: avcodec.h:1172
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:67
Picture * picture
main picture buffer
Definition: mpegvideo.h:255
int extradata_size
Definition: avcodec.h:1455
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
Definition: vc1data.c:53
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
int sprite_width
Definition: vc1.h:380
int fmb_is_raw
forward mb plane is raw
Definition: vc1.h:297
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:38
uint8_t * is_intra_base
Definition: vc1.h:391
int coded_height
Definition: avcodec.h:1515
Definition: vc1.h:138
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:260
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1580
#define MB_INTRA_VLC_BITS
Definition: vc1dec.c:45
#define ER_MB_END
Definition: mpegvideo.h:503
void ff_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2391
int index
Definition: gxfenc.c:72
uint8_t * mv_f_last[2]
Definition: vc1.h:355
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
Definition: vc1dec.c:724
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
int context_initialized
Definition: mpegvideo.h:242
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:124
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
Definition: vc1dec.c:2377
void ff_er_frame_start(MpegEncContext *s)
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2046
#define mid_pred
Definition: mathops.h:94
int dim
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:361
int skip_is_raw
skip mb plane is not coded
Definition: vc1.h:298
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
Definition: intrax8.c:727
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:301
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:368
uint8_t * direct_mb_plane
bitplane for "direct" MBs
Definition: vc1.h:293
short DCTELEM
Definition: dsputil.h:39
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
Definition: vc1acdata.h:339
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
Definition: vc1dec.c:5343
DCTELEM(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:674
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
Definition: vc1.h:292
int numref
number of past field pictures used as reference
Definition: vc1.h:361
const int32_t ff_vc1_dqscale[63]
Definition: vc1data.c:1085
DCTELEM(* block)[6][64]
Definition: vc1.h:388
int blocks_off
Definition: vc1.h:369
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:61
static const uint16_t scale[4]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:325
int8_t * qscale_table
QP table.
Definition: avcodec.h:1139
uint8_t level
Definition: svq3.c:125
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:199
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:399
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:249
void(* clear_blocks)(DCTELEM *blocks)
Definition: dsputil.h:219
MpegEncContext s
Definition: vc1.h:182
int height
Definition: gxfenc.c:72
in the bitstream is reported as 10b
Definition: vc1.h:173
MpegEncContext.
Definition: mpegvideo.h:211
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:316
struct AVCodecContext * avctx
Definition: mpegvideo.h:213
VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstrea...
Definition: pixfmt.h:107
int cur_blk_idx
Definition: vc1.h:389
uint8_t pq
Definition: vc1.h:244
static const int offset_table2[9]
Definition: vc1dec.c:51
discard all non reference
Definition: avcodec.h:533
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
Definition: vc1dec.c:4332
void(* put_signed_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:204
int pqindex
raw pqindex used in coding set selection
Definition: vc1.h:268
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
Definition: vc1acdata.h:246
#define VC1_1REF_MVDATA_VLC_BITS
Definition: vc1data.c:138
Y , 8bpp.
Definition: pixfmt.h:73
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:248
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
Definition: vc1dec.c:1983
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:116
#define VC1_TTMB_VLC_BITS
Definition: vc1data.c:114
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
uint8_t * dest[3]
Definition: mpegvideo.h:435
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1772
static const int size_table[6]
Definition: vc1dec.c:3300
int output_width
Definition: vc1.h:380
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:311
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1dec.c:1616
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:295
uint8_t dquantfrm
pquant parameters
Definition: vc1.h:251
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:315
Bi-dir predicted.
Definition: avutil.h:247
AVProfile.
Definition: avcodec.h:2948
int res_fasttx
reserved, always 1
Definition: vc1.h:194
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2893
int pic_header_flag
Definition: vc1.h:373
int * ttblk
Transform type at the block level.
Definition: vc1.h:265
VLC ff_vc1_ac_coeff_table[8]
Definition: vc1data.c:143
DSP utils.
uint8_t condover
Definition: vc1.h:328
void * priv_data
Definition: avcodec.h:1382
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Definition: vc1.c:525
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
Definition: vc1data.c:130
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
Definition: vc1.h:289
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1dec.c:1362
int rnd
rounding control
Definition: vc1.h:301
VideoDSPContext vdsp
Definition: mpegvideo.h:362
Definition: vc1.h:141
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: vdpau.c:250
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1141
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
Print debugging info for the given picture.
Definition: mpegvideo.c:1735
int acpred_is_raw
Definition: vc1.h:325
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
Definition: vc1dsp.h:66
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
Definition: vc1data.c:1058
uint8_t rptfrm
Definition: vc1.h:314
static int decode012(GetBitContext *gb)
Definition: get_bits.h:529
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int bmvtype
Definition: vc1.h:371
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:301
int key_frame
1 -> keyframe, 0-> not
Definition: avcodec.h:1058
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1dec.c:779
int linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:253
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
Definition: vc1dec.c:4472
int overflg_is_raw
Definition: vc1.h:327
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
Definition: vc1.h:421
struct AVFrame f
Definition: mpegvideo.h:95
Definition: vc1.h:135
int level
Advanced Profile.
Definition: vc1.h:204
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:230
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:372
VLC ff_msmp4_mb_i_vlc
Definition: msmpeg4data.c:36
uint8_t mv_mode2
Secondary MV coding mode (B frames)
Definition: vc1.h:240
int new_sprite
Frame decoding info for sprite modes.
Definition: vc1.h:377
uint8_t * mv_f_next[2]
Definition: vc1.h:356
int two_sprites
Definition: vc1.h:378
int codingset
index of current table set from 11.8 to use for luma block decoding
Definition: vc1.h:266
uint8_t * mb_type[3]
Definition: vc1.h:270
uint16_t * hrd_buffer
Definition: vc1.h:329
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3119
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1331
Definition: vf_drawbox.c:36
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3130
int uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:254
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
Definition: vc1data.c:132
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block)
Definition: vc1dsp.h:36
void(* vc1_v_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:43
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:784
VLC * fourmvbp_vlc
Definition: vc1.h:347
void(* vc1_h_s_overlap)(DCTELEM *left, DCTELEM *right)
Definition: vc1dsp.h:46
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:158
int dc_table_index
Definition: mpegvideo.h:611
VLC ff_msmp4_dc_luma_vlc[2]
Definition: msmpeg4data.c:37
VLC ff_vc1_subblkpat_vlc[3]
Definition: vc1data.c:129
#define inc_blk_idx(idx)
uint8_t halfpq
Uniform quant over image and qp+.5.
Definition: vc1.h:279
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1dec.c:2033
static const uint8_t vc1_delta_level_table[AC_MODES][31]
Definition: vc1acdata.h:203
#define t2
Definition: regdef.h:30
VC1DSPContext vc1dsp
Definition: vc1.h:184
Predicted.
Definition: avutil.h:246
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
Definition: vc1.h:407
int output_height
Definition: vc1.h:380
DSPContext.
Definition: dsputil.h:194
VLC ff_msmp4_dc_chroma_vlc[2]
Definition: msmpeg4data.c:38
op_pixels_func put_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:57
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
Definition: vc1dsp.h:65
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:52