vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "dsputil.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "vc1.h"
35 #include "vc1data.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
38 #include "unary.h"
39 #include "simple_idct.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 
43 #undef NDEBUG
44 #include <assert.h>
45 
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
48 
49 
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
53 
54 /***********************************************************************/
65 enum Imode {
73 }; //imode defines
75 
76  //Bitplane group
78 
80 {
81  MpegEncContext *s = &v->s;
82  int topleft_mb_pos, top_mb_pos;
83  int stride_y, fieldtx;
84  int v_dist;
85 
86  /* The put pixels loop is always one MB row behind the decoding loop,
87  * because we can only put pixels when overlap filtering is done, and
88  * for filtering of the bottom edge of a MB, we need the next MB row
89  * present as well.
90  * Within the row, the put pixels loop is also one MB col behind the
91  * decoding loop. The reason for this is again, because for filtering
92  * of the right MB edge, we need the next MB present. */
93  if (!s->first_slice_line) {
94  if (s->mb_x) {
95  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
96  fieldtx = v->fieldtx_plane[topleft_mb_pos];
97  stride_y = s->linesize << fieldtx;
98  v_dist = (16 - fieldtx) >> (fieldtx == 0);
100  s->dest[0] - 16 * s->linesize - 16,
101  stride_y);
103  s->dest[0] - 16 * s->linesize - 8,
104  stride_y);
106  s->dest[0] - v_dist * s->linesize - 16,
107  stride_y);
109  s->dest[0] - v_dist * s->linesize - 8,
110  stride_y);
112  s->dest[1] - 8 * s->uvlinesize - 8,
113  s->uvlinesize);
115  s->dest[2] - 8 * s->uvlinesize - 8,
116  s->uvlinesize);
117  }
118  if (s->mb_x == s->mb_width - 1) {
119  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
120  fieldtx = v->fieldtx_plane[top_mb_pos];
121  stride_y = s->linesize << fieldtx;
122  v_dist = fieldtx ? 15 : 8;
124  s->dest[0] - 16 * s->linesize,
125  stride_y);
127  s->dest[0] - 16 * s->linesize + 8,
128  stride_y);
130  s->dest[0] - v_dist * s->linesize,
131  stride_y);
133  s->dest[0] - v_dist * s->linesize + 8,
134  stride_y);
136  s->dest[1] - 8 * s->uvlinesize,
137  s->uvlinesize);
139  s->dest[2] - 8 * s->uvlinesize,
140  s->uvlinesize);
141  }
142  }
143 
144 #define inc_blk_idx(idx) do { \
145  idx++; \
146  if (idx >= v->n_allocated_blks) \
147  idx = 0; \
148  } while (0)
149 
154 }
155 
156 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
157 {
158  MpegEncContext *s = &v->s;
159  int j;
160  if (!s->first_slice_line) {
161  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
162  if (s->mb_x)
163  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
164  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
165  for (j = 0; j < 2; j++) {
166  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
167  if (s->mb_x)
168  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
169  }
170  }
171  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
172 
173  if (s->mb_y == s->end_mb_y - 1) {
174  if (s->mb_x) {
175  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
176  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
177  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
178  }
179  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
180  }
181 }
182 
184 {
185  MpegEncContext *s = &v->s;
186  int j;
187 
188  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189  * means it runs two rows/cols behind the decoding loop. */
190  if (!s->first_slice_line) {
191  if (s->mb_x) {
192  if (s->mb_y >= s->start_mb_y + 2) {
193  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
194 
195  if (s->mb_x >= 2)
196  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
197  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
198  for (j = 0; j < 2; j++) {
199  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
200  if (s->mb_x >= 2) {
201  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
202  }
203  }
204  }
205  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
206  }
207 
208  if (s->mb_x == s->mb_width - 1) {
209  if (s->mb_y >= s->start_mb_y + 2) {
210  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
211 
212  if (s->mb_x)
213  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
214  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
215  for (j = 0; j < 2; j++) {
216  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
217  if (s->mb_x >= 2) {
218  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
219  }
220  }
221  }
222  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
223  }
224 
225  if (s->mb_y == s->end_mb_y) {
226  if (s->mb_x) {
227  if (s->mb_x >= 2)
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
229  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
230  if (s->mb_x >= 2) {
231  for (j = 0; j < 2; j++) {
232  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
233  }
234  }
235  }
236 
237  if (s->mb_x == s->mb_width - 1) {
238  if (s->mb_x)
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
240  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
241  if (s->mb_x) {
242  for (j = 0; j < 2; j++) {
243  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
244  }
245  }
246  }
247  }
248  }
249 }
250 
252 {
253  MpegEncContext *s = &v->s;
254  int mb_pos;
255 
256  if (v->condover == CONDOVER_NONE)
257  return;
258 
259  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
260 
261  /* Within a MB, the horizontal overlap always runs before the vertical.
262  * To accomplish that, we run the H on left and internal borders of the
263  * currently decoded MB. Then, we wait for the next overlap iteration
264  * to do H overlap on the right edge of this MB, before moving over and
265  * running the V overlap. Therefore, the V overlap makes us trail by one
266  * MB col and the H overlap filter makes us trail by one MB row. This
267  * is reflected in the time at which we run the put_pixels loop. */
268  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
269  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
270  v->over_flags_plane[mb_pos - 1])) {
272  v->block[v->cur_blk_idx][0]);
274  v->block[v->cur_blk_idx][2]);
275  if (!(s->flags & CODEC_FLAG_GRAY)) {
277  v->block[v->cur_blk_idx][4]);
279  v->block[v->cur_blk_idx][5]);
280  }
281  }
283  v->block[v->cur_blk_idx][1]);
285  v->block[v->cur_blk_idx][3]);
286 
287  if (s->mb_x == s->mb_width - 1) {
288  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
289  v->over_flags_plane[mb_pos - s->mb_stride])) {
291  v->block[v->cur_blk_idx][0]);
293  v->block[v->cur_blk_idx][1]);
294  if (!(s->flags & CODEC_FLAG_GRAY)) {
296  v->block[v->cur_blk_idx][4]);
298  v->block[v->cur_blk_idx][5]);
299  }
300  }
302  v->block[v->cur_blk_idx][2]);
304  v->block[v->cur_blk_idx][3]);
305  }
306  }
307  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
308  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
309  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
311  v->block[v->left_blk_idx][0]);
313  v->block[v->left_blk_idx][1]);
314  if (!(s->flags & CODEC_FLAG_GRAY)) {
316  v->block[v->left_blk_idx][4]);
318  v->block[v->left_blk_idx][5]);
319  }
320  }
322  v->block[v->left_blk_idx][2]);
324  v->block[v->left_blk_idx][3]);
325  }
326 }
327 
331 static void vc1_mc_1mv(VC1Context *v, int dir)
332 {
333  MpegEncContext *s = &v->s;
334  DSPContext *dsp = &v->s.dsp;
335  uint8_t *srcY, *srcU, *srcV;
336  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
337  int off, off_uv;
338  int v_edge_pos = s->v_edge_pos >> v->field_mode;
339 
340  if ((!v->field_mode ||
341  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
342  !v->s.last_picture.f.data[0])
343  return;
344 
345  mx = s->mv[dir][0][0];
346  my = s->mv[dir][0][1];
347 
348  // store motion vectors for further use in B frames
349  if (s->pict_type == AV_PICTURE_TYPE_P) {
350  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
351  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
352  }
353 
354  uvmx = (mx + ((mx & 3) == 3)) >> 1;
355  uvmy = (my + ((my & 3) == 3)) >> 1;
356  v->luma_mv[s->mb_x][0] = uvmx;
357  v->luma_mv[s->mb_x][1] = uvmy;
358 
359  if (v->field_mode &&
360  v->cur_field_type != v->ref_field_type[dir]) {
361  my = my - 2 + 4 * v->cur_field_type;
362  uvmy = uvmy - 2 + 4 * v->cur_field_type;
363  }
364 
365  // fastuvmc shall be ignored for interlaced frame picture
366  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
367  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
368  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
369  }
370  if (v->field_mode) { // interlaced field picture
371  if (!dir) {
372  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
373  srcY = s->current_picture.f.data[0];
374  srcU = s->current_picture.f.data[1];
375  srcV = s->current_picture.f.data[2];
376  } else {
377  srcY = s->last_picture.f.data[0];
378  srcU = s->last_picture.f.data[1];
379  srcV = s->last_picture.f.data[2];
380  }
381  } else {
382  srcY = s->next_picture.f.data[0];
383  srcU = s->next_picture.f.data[1];
384  srcV = s->next_picture.f.data[2];
385  }
386  } else {
387  if (!dir) {
388  srcY = s->last_picture.f.data[0];
389  srcU = s->last_picture.f.data[1];
390  srcV = s->last_picture.f.data[2];
391  } else {
392  srcY = s->next_picture.f.data[0];
393  srcU = s->next_picture.f.data[1];
394  srcV = s->next_picture.f.data[2];
395  }
396  }
397 
398  if (!srcY || !srcU) {
399  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
400  return;
401  }
402 
403  src_x = s->mb_x * 16 + (mx >> 2);
404  src_y = s->mb_y * 16 + (my >> 2);
405  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
406  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
407 
408  if (v->profile != PROFILE_ADVANCED) {
409  src_x = av_clip( src_x, -16, s->mb_width * 16);
410  src_y = av_clip( src_y, -16, s->mb_height * 16);
411  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
412  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
413  } else {
414  src_x = av_clip( src_x, -17, s->avctx->coded_width);
415  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
416  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
417  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
418  }
419 
420  srcY += src_y * s->linesize + src_x;
421  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
422  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
423 
424  if (v->field_mode && v->ref_field_type[dir]) {
425  srcY += s->current_picture_ptr->f.linesize[0];
426  srcU += s->current_picture_ptr->f.linesize[1];
427  srcV += s->current_picture_ptr->f.linesize[2];
428  }
429 
430  /* for grayscale we should not try to read from unknown area */
431  if (s->flags & CODEC_FLAG_GRAY) {
432  srcU = s->edge_emu_buffer + 18 * s->linesize;
433  srcV = s->edge_emu_buffer + 18 * s->linesize;
434  }
435 
437  || s->h_edge_pos < 22 || v_edge_pos < 22
438  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
439  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
440  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
441 
442  srcY -= s->mspel * (1 + s->linesize);
444  17 + s->mspel * 2, 17 + s->mspel * 2,
445  src_x - s->mspel, src_y - s->mspel,
446  s->h_edge_pos, v_edge_pos);
447  srcY = s->edge_emu_buffer;
448  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
449  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
450  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
451  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
452  srcU = uvbuf;
453  srcV = uvbuf + 16;
454  /* if we deal with range reduction we need to scale source blocks */
455  if (v->rangeredfrm) {
456  int i, j;
457  uint8_t *src, *src2;
458 
459  src = srcY;
460  for (j = 0; j < 17 + s->mspel * 2; j++) {
461  for (i = 0; i < 17 + s->mspel * 2; i++)
462  src[i] = ((src[i] - 128) >> 1) + 128;
463  src += s->linesize;
464  }
465  src = srcU;
466  src2 = srcV;
467  for (j = 0; j < 9; j++) {
468  for (i = 0; i < 9; i++) {
469  src[i] = ((src[i] - 128) >> 1) + 128;
470  src2[i] = ((src2[i] - 128) >> 1) + 128;
471  }
472  src += s->uvlinesize;
473  src2 += s->uvlinesize;
474  }
475  }
476  /* if we deal with intensity compensation we need to scale source blocks */
477  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
478  int i, j;
479  uint8_t *src, *src2;
480 
481  src = srcY;
482  for (j = 0; j < 17 + s->mspel * 2; j++) {
483  for (i = 0; i < 17 + s->mspel * 2; i++)
484  src[i] = v->luty[src[i]];
485  src += s->linesize;
486  }
487  src = srcU;
488  src2 = srcV;
489  for (j = 0; j < 9; j++) {
490  for (i = 0; i < 9; i++) {
491  src[i] = v->lutuv[src[i]];
492  src2[i] = v->lutuv[src2[i]];
493  }
494  src += s->uvlinesize;
495  src2 += s->uvlinesize;
496  }
497  }
498  srcY += s->mspel * (1 + s->linesize);
499  }
500 
501  if (v->field_mode && v->cur_field_type) {
502  off = s->current_picture_ptr->f.linesize[0];
503  off_uv = s->current_picture_ptr->f.linesize[1];
504  } else {
505  off = 0;
506  off_uv = 0;
507  }
508  if (s->mspel) {
509  dxy = ((my & 3) << 2) | (mx & 3);
510  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
511  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
512  srcY += s->linesize * 8;
513  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
514  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
515  } else { // hpel mc - always used for luma
516  dxy = (my & 2) | ((mx & 2) >> 1);
517  if (!v->rnd)
518  dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
519  else
520  dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
521  }
522 
523  if (s->flags & CODEC_FLAG_GRAY) return;
524  /* Chroma MC always uses qpel bilinear */
525  uvmx = (uvmx & 3) << 1;
526  uvmy = (uvmy & 3) << 1;
527  if (!v->rnd) {
528  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
529  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
530  } else {
531  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
532  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
533  }
534 }
535 
536 static inline int median4(int a, int b, int c, int d)
537 {
538  if (a < b) {
539  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
540  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
541  } else {
542  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
543  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
544  }
545 }
546 
549 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
550 {
551  MpegEncContext *s = &v->s;
552  DSPContext *dsp = &v->s.dsp;
553  uint8_t *srcY;
554  int dxy, mx, my, src_x, src_y;
555  int off;
556  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
557  int v_edge_pos = s->v_edge_pos >> v->field_mode;
558 
559  if ((!v->field_mode ||
560  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
561  !v->s.last_picture.f.data[0])
562  return;
563 
564  mx = s->mv[dir][n][0];
565  my = s->mv[dir][n][1];
566 
567  if (!dir) {
568  if (v->field_mode) {
569  if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
570  srcY = s->current_picture.f.data[0];
571  else
572  srcY = s->last_picture.f.data[0];
573  } else
574  srcY = s->last_picture.f.data[0];
575  } else
576  srcY = s->next_picture.f.data[0];
577 
578  if (!srcY) {
579  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
580  return;
581  }
582 
583  if (v->field_mode) {
584  if (v->cur_field_type != v->ref_field_type[dir])
585  my = my - 2 + 4 * v->cur_field_type;
586  }
587 
588  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
589  int same_count = 0, opp_count = 0, k;
590  int chosen_mv[2][4][2], f;
591  int tx, ty;
592  for (k = 0; k < 4; k++) {
593  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
594  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
595  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
596  opp_count += f;
597  same_count += 1 - f;
598  }
599  f = opp_count > same_count;
600  switch (f ? opp_count : same_count) {
601  case 4:
602  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
603  chosen_mv[f][2][0], chosen_mv[f][3][0]);
604  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
605  chosen_mv[f][2][1], chosen_mv[f][3][1]);
606  break;
607  case 3:
608  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
609  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
610  break;
611  case 2:
612  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
613  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
614  break;
615  }
616  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
617  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
618  for (k = 0; k < 4; k++)
619  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
620  }
621 
622  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
623  int qx, qy;
624  int width = s->avctx->coded_width;
625  int height = s->avctx->coded_height >> 1;
626  qx = (s->mb_x * 16) + (mx >> 2);
627  qy = (s->mb_y * 8) + (my >> 3);
628 
629  if (qx < -17)
630  mx -= 4 * (qx + 17);
631  else if (qx > width)
632  mx -= 4 * (qx - width);
633  if (qy < -18)
634  my -= 8 * (qy + 18);
635  else if (qy > height + 1)
636  my -= 8 * (qy - height - 1);
637  }
638 
639  if ((v->fcm == ILACE_FRAME) && fieldmv)
640  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
641  else
642  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
643  if (v->field_mode && v->cur_field_type)
644  off += s->current_picture_ptr->f.linesize[0];
645 
646  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
647  if (!fieldmv)
648  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
649  else
650  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
651 
652  if (v->profile != PROFILE_ADVANCED) {
653  src_x = av_clip(src_x, -16, s->mb_width * 16);
654  src_y = av_clip(src_y, -16, s->mb_height * 16);
655  } else {
656  src_x = av_clip(src_x, -17, s->avctx->coded_width);
657  if (v->fcm == ILACE_FRAME) {
658  if (src_y & 1)
659  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
660  else
661  src_y = av_clip(src_y, -18, s->avctx->coded_height);
662  } else {
663  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
664  }
665  }
666 
667  srcY += src_y * s->linesize + src_x;
668  if (v->field_mode && v->ref_field_type[dir])
669  srcY += s->current_picture_ptr->f.linesize[0];
670 
671  if (fieldmv && !(src_y & 1))
672  v_edge_pos--;
673  if (fieldmv && (src_y & 1) && src_y < 4)
674  src_y--;
676  || s->h_edge_pos < 13 || v_edge_pos < 23
677  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
678  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
679  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
680  /* check emulate edge stride and offset */
682  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
683  src_x - s->mspel, src_y - (s->mspel << fieldmv),
684  s->h_edge_pos, v_edge_pos);
685  srcY = s->edge_emu_buffer;
686  /* if we deal with range reduction we need to scale source blocks */
687  if (v->rangeredfrm) {
688  int i, j;
689  uint8_t *src;
690 
691  src = srcY;
692  for (j = 0; j < 9 + s->mspel * 2; j++) {
693  for (i = 0; i < 9 + s->mspel * 2; i++)
694  src[i] = ((src[i] - 128) >> 1) + 128;
695  src += s->linesize << fieldmv;
696  }
697  }
698  /* if we deal with intensity compensation we need to scale source blocks */
699  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
700  int i, j;
701  uint8_t *src;
702 
703  src = srcY;
704  for (j = 0; j < 9 + s->mspel * 2; j++) {
705  for (i = 0; i < 9 + s->mspel * 2; i++)
706  src[i] = v->luty[src[i]];
707  src += s->linesize << fieldmv;
708  }
709  }
710  srcY += s->mspel * (1 + (s->linesize << fieldmv));
711  }
712 
713  if (s->mspel) {
714  dxy = ((my & 3) << 2) | (mx & 3);
715  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
716  } else { // hpel mc - always used for luma
717  dxy = (my & 2) | ((mx & 2) >> 1);
718  if (!v->rnd)
719  dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
720  else
721  dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
722  }
723 }
724 
725 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
726 {
727  int idx, i;
728  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
729 
730  idx = ((a[3] != flag) << 3)
731  | ((a[2] != flag) << 2)
732  | ((a[1] != flag) << 1)
733  | (a[0] != flag);
734  if (!idx) {
735  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
736  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
737  return 4;
738  } else if (count[idx] == 1) {
739  switch (idx) {
740  case 0x1:
741  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
742  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
743  return 3;
744  case 0x2:
745  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
746  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
747  return 3;
748  case 0x4:
749  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
750  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
751  return 3;
752  case 0x8:
753  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
754  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
755  return 3;
756  }
757  } else if (count[idx] == 2) {
758  int t1 = 0, t2 = 0;
759  for (i = 0; i < 3; i++)
760  if (!a[i]) {
761  t1 = i;
762  break;
763  }
764  for (i = t1 + 1; i < 4; i++)
765  if (!a[i]) {
766  t2 = i;
767  break;
768  }
769  *tx = (mvx[t1] + mvx[t2]) / 2;
770  *ty = (mvy[t1] + mvy[t2]) / 2;
771  return 2;
772  } else {
773  return 0;
774  }
775  return -1;
776 }
777 
780 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
781 {
782  MpegEncContext *s = &v->s;
783  DSPContext *dsp = &v->s.dsp;
784  uint8_t *srcU, *srcV;
785  int uvmx, uvmy, uvsrc_x, uvsrc_y;
786  int k, tx = 0, ty = 0;
787  int mvx[4], mvy[4], intra[4], mv_f[4];
788  int valid_count;
789  int chroma_ref_type = v->cur_field_type, off = 0;
790  int v_edge_pos = s->v_edge_pos >> v->field_mode;
791 
792  if (!v->field_mode && !v->s.last_picture.f.data[0])
793  return;
794  if (s->flags & CODEC_FLAG_GRAY)
795  return;
796 
797  for (k = 0; k < 4; k++) {
798  mvx[k] = s->mv[dir][k][0];
799  mvy[k] = s->mv[dir][k][1];
800  intra[k] = v->mb_type[0][s->block_index[k]];
801  if (v->field_mode)
802  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
803  }
804 
805  /* calculate chroma MV vector from four luma MVs */
806  if (!v->field_mode || (v->field_mode && !v->numref)) {
807  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
808  if (!valid_count) {
809  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
810  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
811  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
812  return; //no need to do MC for intra blocks
813  }
814  } else {
815  int dominant = 0;
816  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
817  dominant = 1;
818  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
819  if (dominant)
820  chroma_ref_type = !v->cur_field_type;
821  }
822  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
823  return;
824  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
825  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
826  uvmx = (tx + ((tx & 3) == 3)) >> 1;
827  uvmy = (ty + ((ty & 3) == 3)) >> 1;
828 
829  v->luma_mv[s->mb_x][0] = uvmx;
830  v->luma_mv[s->mb_x][1] = uvmy;
831 
832  if (v->fastuvmc) {
833  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
834  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
835  }
836  // Field conversion bias
837  if (v->cur_field_type != chroma_ref_type)
838  uvmy += 2 - 4 * chroma_ref_type;
839 
840  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
841  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
842 
843  if (v->profile != PROFILE_ADVANCED) {
844  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
845  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
846  } else {
847  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
848  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
849  }
850 
851  if (!dir) {
852  if (v->field_mode) {
853  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
854  srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
855  srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
856  } else {
857  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
858  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
859  }
860  } else {
861  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
862  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
863  }
864  } else {
865  srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
866  srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
867  }
868 
869  if (!srcU) {
870  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
871  return;
872  }
873 
874  if (v->field_mode) {
875  if (chroma_ref_type) {
876  srcU += s->current_picture_ptr->f.linesize[1];
877  srcV += s->current_picture_ptr->f.linesize[2];
878  }
879  off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
880  }
881 
883  || s->h_edge_pos < 18 || v_edge_pos < 18
884  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
885  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
887  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
888  s->h_edge_pos >> 1, v_edge_pos >> 1);
889  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
890  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
891  s->h_edge_pos >> 1, v_edge_pos >> 1);
892  srcU = s->edge_emu_buffer;
893  srcV = s->edge_emu_buffer + 16;
894 
895  /* if we deal with range reduction we need to scale source blocks */
896  if (v->rangeredfrm) {
897  int i, j;
898  uint8_t *src, *src2;
899 
900  src = srcU;
901  src2 = srcV;
902  for (j = 0; j < 9; j++) {
903  for (i = 0; i < 9; i++) {
904  src[i] = ((src[i] - 128) >> 1) + 128;
905  src2[i] = ((src2[i] - 128) >> 1) + 128;
906  }
907  src += s->uvlinesize;
908  src2 += s->uvlinesize;
909  }
910  }
911  /* if we deal with intensity compensation we need to scale source blocks */
912  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
913  int i, j;
914  uint8_t *src, *src2;
915 
916  src = srcU;
917  src2 = srcV;
918  for (j = 0; j < 9; j++) {
919  for (i = 0; i < 9; i++) {
920  src[i] = v->lutuv[src[i]];
921  src2[i] = v->lutuv[src2[i]];
922  }
923  src += s->uvlinesize;
924  src2 += s->uvlinesize;
925  }
926  }
927  }
928 
929  /* Chroma MC always uses qpel bilinear */
930  uvmx = (uvmx & 3) << 1;
931  uvmy = (uvmy & 3) << 1;
932  if (!v->rnd) {
933  dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
934  dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
935  } else {
936  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
937  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
938  }
939 }
940 
944 {
945  MpegEncContext *s = &v->s;
946  DSPContext *dsp = &v->s.dsp;
947  uint8_t *srcU, *srcV;
948  int uvsrc_x, uvsrc_y;
949  int uvmx_field[4], uvmy_field[4];
950  int i, off, tx, ty;
951  int fieldmv = v->blk_mv_type[s->block_index[0]];
952  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
953  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
954  int v_edge_pos = s->v_edge_pos >> 1;
955 
956  if (!v->s.last_picture.f.data[0])
957  return;
958  if (s->flags & CODEC_FLAG_GRAY)
959  return;
960 
961  for (i = 0; i < 4; i++) {
962  tx = s->mv[0][i][0];
963  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
964  ty = s->mv[0][i][1];
965  if (fieldmv)
966  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
967  else
968  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
969  }
970 
971  for (i = 0; i < 4; i++) {
972  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
973  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
974  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
975  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
976  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
977  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
978  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
979  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
980  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
981  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
982 
983  if (fieldmv && !(uvsrc_y & 1))
984  v_edge_pos--;
985  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
986  uvsrc_y--;
987  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
988  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
989  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
990  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
992  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
993  s->h_edge_pos >> 1, v_edge_pos);
994  s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
995  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
996  s->h_edge_pos >> 1, v_edge_pos);
997  srcU = s->edge_emu_buffer;
998  srcV = s->edge_emu_buffer + 16;
999 
1000  /* if we deal with intensity compensation we need to scale source blocks */
1001  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1002  int i, j;
1003  uint8_t *src, *src2;
1004 
1005  src = srcU;
1006  src2 = srcV;
1007  for (j = 0; j < 5; j++) {
1008  for (i = 0; i < 5; i++) {
1009  src[i] = v->lutuv[src[i]];
1010  src2[i] = v->lutuv[src2[i]];
1011  }
1012  src += s->uvlinesize << 1;
1013  src2 += s->uvlinesize << 1;
1014  }
1015  }
1016  }
1017  if (!v->rnd) {
1018  dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1019  dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1020  } else {
1021  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1022  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1023  }
1024  }
1025 }
1026 
1027 /***********************************************************************/
1038 #define GET_MQUANT() \
1039  if (v->dquantfrm) { \
1040  int edges = 0; \
1041  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1042  if (v->dqbilevel) { \
1043  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1044  } else { \
1045  mqdiff = get_bits(gb, 3); \
1046  if (mqdiff != 7) \
1047  mquant = v->pq + mqdiff; \
1048  else \
1049  mquant = get_bits(gb, 5); \
1050  } \
1051  } \
1052  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1053  edges = 1 << v->dqsbedge; \
1054  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1055  edges = (3 << v->dqsbedge) % 15; \
1056  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1057  edges = 15; \
1058  if ((edges&1) && !s->mb_x) \
1059  mquant = v->altpq; \
1060  if ((edges&2) && s->first_slice_line) \
1061  mquant = v->altpq; \
1062  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1063  mquant = v->altpq; \
1064  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1065  mquant = v->altpq; \
1066  }
1067 
1075 #define GET_MVDATA(_dmv_x, _dmv_y) \
1076  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1077  VC1_MV_DIFF_VLC_BITS, 2); \
1078  if (index > 36) { \
1079  mb_has_coeffs = 1; \
1080  index -= 37; \
1081  } else \
1082  mb_has_coeffs = 0; \
1083  s->mb_intra = 0; \
1084  if (!index) { \
1085  _dmv_x = _dmv_y = 0; \
1086  } else if (index == 35) { \
1087  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1088  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1089  } else if (index == 36) { \
1090  _dmv_x = 0; \
1091  _dmv_y = 0; \
1092  s->mb_intra = 1; \
1093  } else { \
1094  index1 = index % 6; \
1095  if (!s->quarter_sample && index1 == 5) val = 1; \
1096  else val = 0; \
1097  if (size_table[index1] - val > 0) \
1098  val = get_bits(gb, size_table[index1] - val); \
1099  else val = 0; \
1100  sign = 0 - (val&1); \
1101  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1102  \
1103  index1 = index / 6; \
1104  if (!s->quarter_sample && index1 == 5) val = 1; \
1105  else val = 0; \
1106  if (size_table[index1] - val > 0) \
1107  val = get_bits(gb, size_table[index1] - val); \
1108  else val = 0; \
1109  sign = 0 - (val & 1); \
1110  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1111  }
1112 
1114  int *dmv_y, int *pred_flag)
1115 {
1116  int index, index1;
1117  int extend_x = 0, extend_y = 0;
1118  GetBitContext *gb = &v->s.gb;
1119  int bits, esc;
1120  int val, sign;
1121  const int* offs_tab;
1122 
1123  if (v->numref) {
1124  bits = VC1_2REF_MVDATA_VLC_BITS;
1125  esc = 125;
1126  } else {
1127  bits = VC1_1REF_MVDATA_VLC_BITS;
1128  esc = 71;
1129  }
1130  switch (v->dmvrange) {
1131  case 1:
1132  extend_x = 1;
1133  break;
1134  case 2:
1135  extend_y = 1;
1136  break;
1137  case 3:
1138  extend_x = extend_y = 1;
1139  break;
1140  }
1141  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1142  if (index == esc) {
1143  *dmv_x = get_bits(gb, v->k_x);
1144  *dmv_y = get_bits(gb, v->k_y);
1145  if (v->numref) {
1146  *pred_flag = *dmv_y & 1;
1147  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1148  }
1149  }
1150  else {
1151  if (extend_x)
1152  offs_tab = offset_table2;
1153  else
1154  offs_tab = offset_table1;
1155  index1 = (index + 1) % 9;
1156  if (index1 != 0) {
1157  val = get_bits(gb, index1 + extend_x);
1158  sign = 0 -(val & 1);
1159  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1160  } else
1161  *dmv_x = 0;
1162  if (extend_y)
1163  offs_tab = offset_table2;
1164  else
1165  offs_tab = offset_table1;
1166  index1 = (index + 1) / 9;
1167  if (index1 > v->numref) {
1168  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1169  sign = 0 - (val & 1);
1170  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1171  } else
1172  *dmv_y = 0;
1173  if (v->numref)
1174  *pred_flag = index1 & 1;
1175  }
1176 }
1177 
1178 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1179 {
1180  int scaledvalue, refdist;
1181  int scalesame1, scalesame2;
1182  int scalezone1_x, zone1offset_x;
1183  int table_index = dir ^ v->second_field;
1184 
1185  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1186  refdist = v->refdist;
1187  else
1188  refdist = dir ? v->brfd : v->frfd;
1189  if (refdist > 3)
1190  refdist = 3;
1191  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1192  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1193  scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1194  zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1195 
1196  if (FFABS(n) > 255)
1197  scaledvalue = n;
1198  else {
1199  if (FFABS(n) < scalezone1_x)
1200  scaledvalue = (n * scalesame1) >> 8;
1201  else {
1202  if (n < 0)
1203  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1204  else
1205  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1206  }
1207  }
1208  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1209 }
1210 
1211 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1212 {
1213  int scaledvalue, refdist;
1214  int scalesame1, scalesame2;
1215  int scalezone1_y, zone1offset_y;
1216  int table_index = dir ^ v->second_field;
1217 
1218  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1219  refdist = v->refdist;
1220  else
1221  refdist = dir ? v->brfd : v->frfd;
1222  if (refdist > 3)
1223  refdist = 3;
1224  scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1225  scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1226  scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1227  zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1228 
1229  if (FFABS(n) > 63)
1230  scaledvalue = n;
1231  else {
1232  if (FFABS(n) < scalezone1_y)
1233  scaledvalue = (n * scalesame1) >> 8;
1234  else {
1235  if (n < 0)
1236  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1237  else
1238  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1239  }
1240  }
1241 
1242  if (v->cur_field_type && !v->ref_field_type[dir])
1243  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1244  else
1245  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1246 }
1247 
1248 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1249 {
1250  int scalezone1_x, zone1offset_x;
1251  int scaleopp1, scaleopp2, brfd;
1252  int scaledvalue;
1253 
1254  brfd = FFMIN(v->brfd, 3);
1255  scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1256  zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1257  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1258  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1259 
1260  if (FFABS(n) > 255)
1261  scaledvalue = n;
1262  else {
1263  if (FFABS(n) < scalezone1_x)
1264  scaledvalue = (n * scaleopp1) >> 8;
1265  else {
1266  if (n < 0)
1267  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1268  else
1269  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1270  }
1271  }
1272  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1273 }
1274 
1275 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1276 {
1277  int scalezone1_y, zone1offset_y;
1278  int scaleopp1, scaleopp2, brfd;
1279  int scaledvalue;
1280 
1281  brfd = FFMIN(v->brfd, 3);
1282  scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1283  zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1284  scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1285  scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1286 
1287  if (FFABS(n) > 63)
1288  scaledvalue = n;
1289  else {
1290  if (FFABS(n) < scalezone1_y)
1291  scaledvalue = (n * scaleopp1) >> 8;
1292  else {
1293  if (n < 0)
1294  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1295  else
1296  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1297  }
1298  }
1299  if (v->cur_field_type && !v->ref_field_type[dir]) {
1300  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1301  } else {
1302  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1303  }
1304 }
1305 
1306 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1307  int dim, int dir)
1308 {
1309  int brfd, scalesame;
1310  int hpel = 1 - v->s.quarter_sample;
1311 
1312  n >>= hpel;
1313  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1314  if (dim)
1315  n = scaleforsame_y(v, i, n, dir) << hpel;
1316  else
1317  n = scaleforsame_x(v, n, dir) << hpel;
1318  return n;
1319  }
1320  brfd = FFMIN(v->brfd, 3);
1321  scalesame = vc1_b_field_mvpred_scales[0][brfd];
1322 
1323  n = (n * scalesame >> 8) << hpel;
1324  return n;
1325 }
1326 
1327 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1328  int dim, int dir)
1329 {
1330  int refdist, scaleopp;
1331  int hpel = 1 - v->s.quarter_sample;
1332 
1333  n >>= hpel;
1334  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1335  if (dim)
1336  n = scaleforopp_y(v, n, dir) << hpel;
1337  else
1338  n = scaleforopp_x(v, n) << hpel;
1339  return n;
1340  }
1341  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1342  refdist = FFMIN(v->refdist, 3);
1343  else
1344  refdist = dir ? v->brfd : v->frfd;
1345  scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1346 
1347  n = (n * scaleopp >> 8) << hpel;
1348  return n;
1349 }
1350 
1353 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1354  int mv1, int r_x, int r_y, uint8_t* is_intra,
1355  int pred_flag, int dir)
1356 {
1357  MpegEncContext *s = &v->s;
1358  int xy, wrap, off = 0;
1359  int16_t *A, *B, *C;
1360  int px, py;
1361  int sum;
1362  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1363  int opposit, a_f, b_f, c_f;
1364  int16_t field_predA[2];
1365  int16_t field_predB[2];
1366  int16_t field_predC[2];
1367  int a_valid, b_valid, c_valid;
1368  int hybridmv_thresh, y_bias = 0;
1369 
1370  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1372  mixedmv_pic = 1;
1373  else
1374  mixedmv_pic = 0;
1375  /* scale MV difference to be quad-pel */
1376  dmv_x <<= 1 - s->quarter_sample;
1377  dmv_y <<= 1 - s->quarter_sample;
1378 
1379  wrap = s->b8_stride;
1380  xy = s->block_index[n];
1381 
1382  if (s->mb_intra) {
1383  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1384  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1385  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1386  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1387  if (mv1) { /* duplicate motion data for 1-MV block */
1388  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1389  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1390  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1391  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1392  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1393  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1394  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1395  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1396  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1397  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1398  s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1399  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1400  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1401  }
1402  return;
1403  }
1404 
1405  C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1406  A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1407  if (mv1) {
1408  if (v->field_mode && mixedmv_pic)
1409  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1410  else
1411  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1412  } else {
1413  //in 4-MV mode different blocks have different B predictor position
1414  switch (n) {
1415  case 0:
1416  off = (s->mb_x > 0) ? -1 : 1;
1417  break;
1418  case 1:
1419  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1420  break;
1421  case 2:
1422  off = 1;
1423  break;
1424  case 3:
1425  off = -1;
1426  }
1427  }
1428  B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1429 
1430  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1431  b_valid = a_valid && (s->mb_width > 1);
1432  c_valid = s->mb_x || (n == 1 || n == 3);
1433  if (v->field_mode) {
1434  a_valid = a_valid && !is_intra[xy - wrap];
1435  b_valid = b_valid && !is_intra[xy - wrap + off];
1436  c_valid = c_valid && !is_intra[xy - 1];
1437  }
1438 
1439  if (a_valid) {
1440  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1441  num_oppfield += a_f;
1442  num_samefield += 1 - a_f;
1443  field_predA[0] = A[0];
1444  field_predA[1] = A[1];
1445  } else {
1446  field_predA[0] = field_predA[1] = 0;
1447  a_f = 0;
1448  }
1449  if (b_valid) {
1450  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1451  num_oppfield += b_f;
1452  num_samefield += 1 - b_f;
1453  field_predB[0] = B[0];
1454  field_predB[1] = B[1];
1455  } else {
1456  field_predB[0] = field_predB[1] = 0;
1457  b_f = 0;
1458  }
1459  if (c_valid) {
1460  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1461  num_oppfield += c_f;
1462  num_samefield += 1 - c_f;
1463  field_predC[0] = C[0];
1464  field_predC[1] = C[1];
1465  } else {
1466  field_predC[0] = field_predC[1] = 0;
1467  c_f = 0;
1468  }
1469 
1470  if (v->field_mode) {
1471  if (num_samefield <= num_oppfield)
1472  opposit = 1 - pred_flag;
1473  else
1474  opposit = pred_flag;
1475  } else
1476  opposit = 0;
1477  if (opposit) {
1478  if (a_valid && !a_f) {
1479  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1480  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1481  }
1482  if (b_valid && !b_f) {
1483  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1484  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1485  }
1486  if (c_valid && !c_f) {
1487  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1488  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1489  }
1490  v->mv_f[dir][xy + v->blocks_off] = 1;
1491  v->ref_field_type[dir] = !v->cur_field_type;
1492  } else {
1493  if (a_valid && a_f) {
1494  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1495  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1496  }
1497  if (b_valid && b_f) {
1498  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1499  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1500  }
1501  if (c_valid && c_f) {
1502  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1503  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1504  }
1505  v->mv_f[dir][xy + v->blocks_off] = 0;
1506  v->ref_field_type[dir] = v->cur_field_type;
1507  }
1508 
1509  if (a_valid) {
1510  px = field_predA[0];
1511  py = field_predA[1];
1512  } else if (c_valid) {
1513  px = field_predC[0];
1514  py = field_predC[1];
1515  } else if (b_valid) {
1516  px = field_predB[0];
1517  py = field_predB[1];
1518  } else {
1519  px = 0;
1520  py = 0;
1521  }
1522 
1523  if (num_samefield + num_oppfield > 1) {
1524  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1525  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1526  }
1527 
1528  /* Pullback MV as specified in 8.3.5.3.4 */
1529  if (!v->field_mode) {
1530  int qx, qy, X, Y;
1531  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1532  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1533  X = (s->mb_width << 6) - 4;
1534  Y = (s->mb_height << 6) - 4;
1535  if (mv1) {
1536  if (qx + px < -60) px = -60 - qx;
1537  if (qy + py < -60) py = -60 - qy;
1538  } else {
1539  if (qx + px < -28) px = -28 - qx;
1540  if (qy + py < -28) py = -28 - qy;
1541  }
1542  if (qx + px > X) px = X - qx;
1543  if (qy + py > Y) py = Y - qy;
1544  }
1545 
1546  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1547  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1548  hybridmv_thresh = 32;
1549  if (a_valid && c_valid) {
1550  if (is_intra[xy - wrap])
1551  sum = FFABS(px) + FFABS(py);
1552  else
1553  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1554  if (sum > hybridmv_thresh) {
1555  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1556  px = field_predA[0];
1557  py = field_predA[1];
1558  } else {
1559  px = field_predC[0];
1560  py = field_predC[1];
1561  }
1562  } else {
1563  if (is_intra[xy - 1])
1564  sum = FFABS(px) + FFABS(py);
1565  else
1566  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1567  if (sum > hybridmv_thresh) {
1568  if (get_bits1(&s->gb)) {
1569  px = field_predA[0];
1570  py = field_predA[1];
1571  } else {
1572  px = field_predC[0];
1573  py = field_predC[1];
1574  }
1575  }
1576  }
1577  }
1578  }
1579 
1580  if (v->field_mode && !s->quarter_sample) {
1581  r_x <<= 1;
1582  r_y <<= 1;
1583  }
1584  if (v->field_mode && v->numref)
1585  r_y >>= 1;
1586  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1587  y_bias = 1;
1588  /* store MV using signed modulus of MV range defined in 4.11 */
1589  s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1590  s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1591  if (mv1) { /* duplicate motion data for 1-MV block */
1592  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1593  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1594  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1595  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1596  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1597  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1598  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1599  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1600  }
1601 }
1602 
1605 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1606  int mvn, int r_x, int r_y, uint8_t* is_intra)
1607 {
1608  MpegEncContext *s = &v->s;
1609  int xy, wrap, off = 0;
1610  int A[2], B[2], C[2];
1611  int px, py;
1612  int a_valid = 0, b_valid = 0, c_valid = 0;
1613  int field_a, field_b, field_c; // 0: same, 1: opposit
1614  int total_valid, num_samefield, num_oppfield;
1615  int pos_c, pos_b, n_adj;
1616 
1617  wrap = s->b8_stride;
1618  xy = s->block_index[n];
1619 
1620  if (s->mb_intra) {
1621  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1622  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1623  s->current_picture.f.motion_val[1][xy][0] = 0;
1624  s->current_picture.f.motion_val[1][xy][1] = 0;
1625  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1626  s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1627  s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1628  s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1629  s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1630  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1631  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1632  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1633  s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1634  s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1635  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1636  s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1637  s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1638  s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1639  }
1640  return;
1641  }
1642 
1643  off = ((n == 0) || (n == 1)) ? 1 : -1;
1644  /* predict A */
1645  if (s->mb_x || (n == 1) || (n == 3)) {
1646  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1647  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1648  A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1649  A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1650  a_valid = 1;
1651  } else { // current block has frame mv and cand. has field MV (so average)
1652  A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1653  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1654  A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1655  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1656  a_valid = 1;
1657  }
1658  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1659  a_valid = 0;
1660  A[0] = A[1] = 0;
1661  }
1662  } else
1663  A[0] = A[1] = 0;
1664  /* Predict B and C */
1665  B[0] = B[1] = C[0] = C[1] = 0;
1666  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1667  if (!s->first_slice_line) {
1668  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1669  b_valid = 1;
1670  n_adj = n | 2;
1671  pos_b = s->block_index[n_adj] - 2 * wrap;
1672  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1673  n_adj = (n & 2) | (n & 1);
1674  }
1675  B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1676  B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1677  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1678  B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1679  B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1680  }
1681  }
1682  if (s->mb_width > 1) {
1683  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1684  c_valid = 1;
1685  n_adj = 2;
1686  pos_c = s->block_index[2] - 2 * wrap + 2;
1687  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1688  n_adj = n & 2;
1689  }
1690  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1691  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1692  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1693  C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1694  C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1695  }
1696  if (s->mb_x == s->mb_width - 1) {
1697  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1698  c_valid = 1;
1699  n_adj = 3;
1700  pos_c = s->block_index[3] - 2 * wrap - 2;
1701  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1702  n_adj = n | 1;
1703  }
1704  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1705  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1706  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1707  C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1708  C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1709  }
1710  } else
1711  c_valid = 0;
1712  }
1713  }
1714  }
1715  }
1716  } else {
1717  pos_b = s->block_index[1];
1718  b_valid = 1;
1719  B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1720  B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1721  pos_c = s->block_index[0];
1722  c_valid = 1;
1723  C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1724  C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1725  }
1726 
1727  total_valid = a_valid + b_valid + c_valid;
1728  // check if predictor A is out of bounds
1729  if (!s->mb_x && !(n == 1 || n == 3)) {
1730  A[0] = A[1] = 0;
1731  }
1732  // check if predictor B is out of bounds
1733  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1734  B[0] = B[1] = C[0] = C[1] = 0;
1735  }
1736  if (!v->blk_mv_type[xy]) {
1737  if (s->mb_width == 1) {
1738  px = B[0];
1739  py = B[1];
1740  } else {
1741  if (total_valid >= 2) {
1742  px = mid_pred(A[0], B[0], C[0]);
1743  py = mid_pred(A[1], B[1], C[1]);
1744  } else if (total_valid) {
1745  if (a_valid) { px = A[0]; py = A[1]; }
1746  if (b_valid) { px = B[0]; py = B[1]; }
1747  if (c_valid) { px = C[0]; py = C[1]; }
1748  } else
1749  px = py = 0;
1750  }
1751  } else {
1752  if (a_valid)
1753  field_a = (A[1] & 4) ? 1 : 0;
1754  else
1755  field_a = 0;
1756  if (b_valid)
1757  field_b = (B[1] & 4) ? 1 : 0;
1758  else
1759  field_b = 0;
1760  if (c_valid)
1761  field_c = (C[1] & 4) ? 1 : 0;
1762  else
1763  field_c = 0;
1764 
1765  num_oppfield = field_a + field_b + field_c;
1766  num_samefield = total_valid - num_oppfield;
1767  if (total_valid == 3) {
1768  if ((num_samefield == 3) || (num_oppfield == 3)) {
1769  px = mid_pred(A[0], B[0], C[0]);
1770  py = mid_pred(A[1], B[1], C[1]);
1771  } else if (num_samefield >= num_oppfield) {
1772  /* take one MV from same field set depending on priority
1773  the check for B may not be necessary */
1774  px = !field_a ? A[0] : B[0];
1775  py = !field_a ? A[1] : B[1];
1776  } else {
1777  px = field_a ? A[0] : B[0];
1778  py = field_a ? A[1] : B[1];
1779  }
1780  } else if (total_valid == 2) {
1781  if (num_samefield >= num_oppfield) {
1782  if (!field_a && a_valid) {
1783  px = A[0];
1784  py = A[1];
1785  } else if (!field_b && b_valid) {
1786  px = B[0];
1787  py = B[1];
1788  } else if (c_valid) {
1789  px = C[0];
1790  py = C[1];
1791  } else px = py = 0;
1792  } else {
1793  if (field_a && a_valid) {
1794  px = A[0];
1795  py = A[1];
1796  } else if (field_b && b_valid) {
1797  px = B[0];
1798  py = B[1];
1799  } else if (c_valid) {
1800  px = C[0];
1801  py = C[1];
1802  }
1803  }
1804  } else if (total_valid == 1) {
1805  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1806  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1807  } else
1808  px = py = 0;
1809  }
1810 
1811  /* store MV using signed modulus of MV range defined in 4.11 */
1812  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1813  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1814  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1815  s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1816  s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1817  s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1818  s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1819  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1820  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1821  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1822  s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1823  s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1824  s->mv[0][n + 1][0] = s->mv[0][n][0];
1825  s->mv[0][n + 1][1] = s->mv[0][n][1];
1826  }
1827 }
1828 
1832 {
1833  MpegEncContext *s = &v->s;
1834  DSPContext *dsp = &v->s.dsp;
1835  uint8_t *srcY, *srcU, *srcV;
1836  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1837  int off, off_uv;
1838  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1839 
1840  if (!v->field_mode && !v->s.next_picture.f.data[0])
1841  return;
1842 
1843  mx = s->mv[1][0][0];
1844  my = s->mv[1][0][1];
1845  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1846  uvmy = (my + ((my & 3) == 3)) >> 1;
1847  if (v->field_mode) {
1848  if (v->cur_field_type != v->ref_field_type[1])
1849  my = my - 2 + 4 * v->cur_field_type;
1850  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1851  }
1852  if (v->fastuvmc) {
1853  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1854  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1855  }
1856  srcY = s->next_picture.f.data[0];
1857  srcU = s->next_picture.f.data[1];
1858  srcV = s->next_picture.f.data[2];
1859 
1860  src_x = s->mb_x * 16 + (mx >> 2);
1861  src_y = s->mb_y * 16 + (my >> 2);
1862  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1863  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1864 
1865  if (v->profile != PROFILE_ADVANCED) {
1866  src_x = av_clip( src_x, -16, s->mb_width * 16);
1867  src_y = av_clip( src_y, -16, s->mb_height * 16);
1868  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1869  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1870  } else {
1871  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1872  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1873  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1874  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1875  }
1876 
1877  srcY += src_y * s->linesize + src_x;
1878  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1879  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1880 
1881  if (v->field_mode && v->ref_field_type[1]) {
1882  srcY += s->current_picture_ptr->f.linesize[0];
1883  srcU += s->current_picture_ptr->f.linesize[1];
1884  srcV += s->current_picture_ptr->f.linesize[2];
1885  }
1886 
1887  /* for grayscale we should not try to read from unknown area */
1888  if (s->flags & CODEC_FLAG_GRAY) {
1889  srcU = s->edge_emu_buffer + 18 * s->linesize;
1890  srcV = s->edge_emu_buffer + 18 * s->linesize;
1891  }
1892 
1893  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1894  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
1895  || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
1896  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1897 
1898  srcY -= s->mspel * (1 + s->linesize);
1899  s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
1900  17 + s->mspel * 2, 17 + s->mspel * 2,
1901  src_x - s->mspel, src_y - s->mspel,
1902  s->h_edge_pos, v_edge_pos);
1903  srcY = s->edge_emu_buffer;
1904  s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1905  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1906  s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1907  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1908  srcU = uvbuf;
1909  srcV = uvbuf + 16;
1910  /* if we deal with range reduction we need to scale source blocks */
1911  if (v->rangeredfrm) {
1912  int i, j;
1913  uint8_t *src, *src2;
1914 
1915  src = srcY;
1916  for (j = 0; j < 17 + s->mspel * 2; j++) {
1917  for (i = 0; i < 17 + s->mspel * 2; i++)
1918  src[i] = ((src[i] - 128) >> 1) + 128;
1919  src += s->linesize;
1920  }
1921  src = srcU;
1922  src2 = srcV;
1923  for (j = 0; j < 9; j++) {
1924  for (i = 0; i < 9; i++) {
1925  src[i] = ((src[i] - 128) >> 1) + 128;
1926  src2[i] = ((src2[i] - 128) >> 1) + 128;
1927  }
1928  src += s->uvlinesize;
1929  src2 += s->uvlinesize;
1930  }
1931  }
1932  srcY += s->mspel * (1 + s->linesize);
1933  }
1934 
1935  if (v->field_mode && v->cur_field_type) {
1936  off = s->current_picture_ptr->f.linesize[0];
1937  off_uv = s->current_picture_ptr->f.linesize[1];
1938  } else {
1939  off = 0;
1940  off_uv = 0;
1941  }
1942 
1943  if (s->mspel) {
1944  dxy = ((my & 3) << 2) | (mx & 3);
1945  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1946  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1947  srcY += s->linesize * 8;
1948  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1949  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1950  } else { // hpel mc
1951  dxy = (my & 2) | ((mx & 2) >> 1);
1952 
1953  if (!v->rnd)
1954  dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1955  else
1956  dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1957  }
1958 
1959  if (s->flags & CODEC_FLAG_GRAY) return;
1960  /* Chroma MC always uses qpel blilinear */
1961  uvmx = (uvmx & 3) << 1;
1962  uvmy = (uvmy & 3) << 1;
1963  if (!v->rnd) {
1964  dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1965  dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1966  } else {
1967  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1968  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1969  }
1970 }
1971 
1972 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1973 {
1974  int n = bfrac;
1975 
1976 #if B_FRACTION_DEN==256
1977  if (inv)
1978  n -= 256;
1979  if (!qs)
1980  return 2 * ((value * n + 255) >> 9);
1981  return (value * n + 128) >> 8;
1982 #else
1983  if (inv)
1984  n -= B_FRACTION_DEN;
1985  if (!qs)
1986  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1987  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1988 #endif
1989 }
1990 
1991 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
1992  int qs, int qs_last)
1993 {
1994  int n = bfrac;
1995 
1996  if (inv)
1997  n -= 256;
1998  n <<= !qs_last;
1999  if (!qs)
2000  return (value * n + 255) >> 9;
2001  else
2002  return (value * n + 128) >> 8;
2003 }
2004 
2007 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2008  int direct, int mode)
2009 {
2010  if (v->use_ic) {
2011  v->mv_mode2 = v->mv_mode;
2013  }
2014  if (direct) {
2015  vc1_mc_1mv(v, 0);
2016  vc1_interp_mc(v);
2017  if (v->use_ic)
2018  v->mv_mode = v->mv_mode2;
2019  return;
2020  }
2021  if (mode == BMV_TYPE_INTERPOLATED) {
2022  vc1_mc_1mv(v, 0);
2023  vc1_interp_mc(v);
2024  if (v->use_ic)
2025  v->mv_mode = v->mv_mode2;
2026  return;
2027  }
2028 
2029  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2030  v->mv_mode = v->mv_mode2;
2031  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2032  if (v->use_ic)
2033  v->mv_mode = v->mv_mode2;
2034 }
2035 
2036 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2037  int direct, int mvtype)
2038 {
2039  MpegEncContext *s = &v->s;
2040  int xy, wrap, off = 0;
2041  int16_t *A, *B, *C;
2042  int px, py;
2043  int sum;
2044  int r_x, r_y;
2045  const uint8_t *is_intra = v->mb_type[0];
2046 
2047  r_x = v->range_x;
2048  r_y = v->range_y;
2049  /* scale MV difference to be quad-pel */
2050  dmv_x[0] <<= 1 - s->quarter_sample;
2051  dmv_y[0] <<= 1 - s->quarter_sample;
2052  dmv_x[1] <<= 1 - s->quarter_sample;
2053  dmv_y[1] <<= 1 - s->quarter_sample;
2054 
2055  wrap = s->b8_stride;
2056  xy = s->block_index[0];
2057 
2058  if (s->mb_intra) {
2059  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2060  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2061  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2062  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2063  return;
2064  }
2065  if (!v->field_mode) {
2066  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2067  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2068  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2069  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2070 
2071  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2072  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2073  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2074  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2075  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2076  }
2077  if (direct) {
2078  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2079  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2080  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2081  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2082  return;
2083  }
2084 
2085  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2086  C = s->current_picture.f.motion_val[0][xy - 2];
2087  A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2088  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2089  B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2090 
2091  if (!s->mb_x) C[0] = C[1] = 0;
2092  if (!s->first_slice_line) { // predictor A is not out of bounds
2093  if (s->mb_width == 1) {
2094  px = A[0];
2095  py = A[1];
2096  } else {
2097  px = mid_pred(A[0], B[0], C[0]);
2098  py = mid_pred(A[1], B[1], C[1]);
2099  }
2100  } else if (s->mb_x) { // predictor C is not out of bounds
2101  px = C[0];
2102  py = C[1];
2103  } else {
2104  px = py = 0;
2105  }
2106  /* Pullback MV as specified in 8.3.5.3.4 */
2107  {
2108  int qx, qy, X, Y;
2109  if (v->profile < PROFILE_ADVANCED) {
2110  qx = (s->mb_x << 5);
2111  qy = (s->mb_y << 5);
2112  X = (s->mb_width << 5) - 4;
2113  Y = (s->mb_height << 5) - 4;
2114  if (qx + px < -28) px = -28 - qx;
2115  if (qy + py < -28) py = -28 - qy;
2116  if (qx + px > X) px = X - qx;
2117  if (qy + py > Y) py = Y - qy;
2118  } else {
2119  qx = (s->mb_x << 6);
2120  qy = (s->mb_y << 6);
2121  X = (s->mb_width << 6) - 4;
2122  Y = (s->mb_height << 6) - 4;
2123  if (qx + px < -60) px = -60 - qx;
2124  if (qy + py < -60) py = -60 - qy;
2125  if (qx + px > X) px = X - qx;
2126  if (qy + py > Y) py = Y - qy;
2127  }
2128  }
2129  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2130  if (0 && !s->first_slice_line && s->mb_x) {
2131  if (is_intra[xy - wrap])
2132  sum = FFABS(px) + FFABS(py);
2133  else
2134  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2135  if (sum > 32) {
2136  if (get_bits1(&s->gb)) {
2137  px = A[0];
2138  py = A[1];
2139  } else {
2140  px = C[0];
2141  py = C[1];
2142  }
2143  } else {
2144  if (is_intra[xy - 2])
2145  sum = FFABS(px) + FFABS(py);
2146  else
2147  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2148  if (sum > 32) {
2149  if (get_bits1(&s->gb)) {
2150  px = A[0];
2151  py = A[1];
2152  } else {
2153  px = C[0];
2154  py = C[1];
2155  }
2156  }
2157  }
2158  }
2159  /* store MV using signed modulus of MV range defined in 4.11 */
2160  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2161  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2162  }
2163  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2164  C = s->current_picture.f.motion_val[1][xy - 2];
2165  A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2166  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2167  B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2168 
2169  if (!s->mb_x)
2170  C[0] = C[1] = 0;
2171  if (!s->first_slice_line) { // predictor A is not out of bounds
2172  if (s->mb_width == 1) {
2173  px = A[0];
2174  py = A[1];
2175  } else {
2176  px = mid_pred(A[0], B[0], C[0]);
2177  py = mid_pred(A[1], B[1], C[1]);
2178  }
2179  } else if (s->mb_x) { // predictor C is not out of bounds
2180  px = C[0];
2181  py = C[1];
2182  } else {
2183  px = py = 0;
2184  }
2185  /* Pullback MV as specified in 8.3.5.3.4 */
2186  {
2187  int qx, qy, X, Y;
2188  if (v->profile < PROFILE_ADVANCED) {
2189  qx = (s->mb_x << 5);
2190  qy = (s->mb_y << 5);
2191  X = (s->mb_width << 5) - 4;
2192  Y = (s->mb_height << 5) - 4;
2193  if (qx + px < -28) px = -28 - qx;
2194  if (qy + py < -28) py = -28 - qy;
2195  if (qx + px > X) px = X - qx;
2196  if (qy + py > Y) py = Y - qy;
2197  } else {
2198  qx = (s->mb_x << 6);
2199  qy = (s->mb_y << 6);
2200  X = (s->mb_width << 6) - 4;
2201  Y = (s->mb_height << 6) - 4;
2202  if (qx + px < -60) px = -60 - qx;
2203  if (qy + py < -60) py = -60 - qy;
2204  if (qx + px > X) px = X - qx;
2205  if (qy + py > Y) py = Y - qy;
2206  }
2207  }
2208  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2209  if (0 && !s->first_slice_line && s->mb_x) {
2210  if (is_intra[xy - wrap])
2211  sum = FFABS(px) + FFABS(py);
2212  else
2213  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2214  if (sum > 32) {
2215  if (get_bits1(&s->gb)) {
2216  px = A[0];
2217  py = A[1];
2218  } else {
2219  px = C[0];
2220  py = C[1];
2221  }
2222  } else {
2223  if (is_intra[xy - 2])
2224  sum = FFABS(px) + FFABS(py);
2225  else
2226  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2227  if (sum > 32) {
2228  if (get_bits1(&s->gb)) {
2229  px = A[0];
2230  py = A[1];
2231  } else {
2232  px = C[0];
2233  py = C[1];
2234  }
2235  }
2236  }
2237  }
2238  /* store MV using signed modulus of MV range defined in 4.11 */
2239 
2240  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2241  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2242  }
2243  s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2244  s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2245  s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2246  s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2247 }
2248 
2249 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2250 {
2251  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2252  MpegEncContext *s = &v->s;
2253  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2254 
2255  if (v->bmvtype == BMV_TYPE_DIRECT) {
2256  int total_opp, k, f;
2257  if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2258  s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2259  v->bfraction, 0, s->quarter_sample, v->qs_last);
2260  s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2261  v->bfraction, 0, s->quarter_sample, v->qs_last);
2262  s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2263  v->bfraction, 1, s->quarter_sample, v->qs_last);
2264  s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2265  v->bfraction, 1, s->quarter_sample, v->qs_last);
2266 
2267  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2268  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2269  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2270  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2271  f = (total_opp > 2) ? 1 : 0;
2272  } else {
2273  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2274  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2275  f = 0;
2276  }
2277  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2278  for (k = 0; k < 4; k++) {
2279  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2280  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2281  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2282  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2283  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2284  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2285  }
2286  return;
2287  }
2288  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2289  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2290  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2291  return;
2292  }
2293  if (dir) { // backward
2294  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2295  if (n == 3 || mv1) {
2296  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2297  }
2298  } else { // forward
2299  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2300  if (n == 3 || mv1) {
2301  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2302  }
2303  }
2304 }
2305 
2315 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2316  int16_t **dc_val_ptr, int *dir_ptr)
2317 {
2318  int a, b, c, wrap, pred, scale;
2319  int16_t *dc_val;
2320  static const uint16_t dcpred[32] = {
2321  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2322  114, 102, 93, 85, 79, 73, 68, 64,
2323  60, 57, 54, 51, 49, 47, 45, 43,
2324  41, 39, 38, 37, 35, 34, 33
2325  };
2326 
2327  /* find prediction - wmv3_dc_scale always used here in fact */
2328  if (n < 4) scale = s->y_dc_scale;
2329  else scale = s->c_dc_scale;
2330 
2331  wrap = s->block_wrap[n];
2332  dc_val = s->dc_val[0] + s->block_index[n];
2333 
2334  /* B A
2335  * C X
2336  */
2337  c = dc_val[ - 1];
2338  b = dc_val[ - 1 - wrap];
2339  a = dc_val[ - wrap];
2340 
2341  if (pq < 9 || !overlap) {
2342  /* Set outer values */
2343  if (s->first_slice_line && (n != 2 && n != 3))
2344  b = a = dcpred[scale];
2345  if (s->mb_x == 0 && (n != 1 && n != 3))
2346  b = c = dcpred[scale];
2347  } else {
2348  /* Set outer values */
2349  if (s->first_slice_line && (n != 2 && n != 3))
2350  b = a = 0;
2351  if (s->mb_x == 0 && (n != 1 && n != 3))
2352  b = c = 0;
2353  }
2354 
2355  if (abs(a - b) <= abs(b - c)) {
2356  pred = c;
2357  *dir_ptr = 1; // left
2358  } else {
2359  pred = a;
2360  *dir_ptr = 0; // top
2361  }
2362 
2363  /* update predictor */
2364  *dc_val_ptr = &dc_val[0];
2365  return pred;
2366 }
2367 
2368 
2380 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2381  int a_avail, int c_avail,
2382  int16_t **dc_val_ptr, int *dir_ptr)
2383 {
2384  int a, b, c, wrap, pred;
2385  int16_t *dc_val;
2386  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2387  int q1, q2 = 0;
2388  int dqscale_index;
2389 
2390  wrap = s->block_wrap[n];
2391  dc_val = s->dc_val[0] + s->block_index[n];
2392 
2393  /* B A
2394  * C X
2395  */
2396  c = dc_val[ - 1];
2397  b = dc_val[ - 1 - wrap];
2398  a = dc_val[ - wrap];
2399  /* scale predictors if needed */
2400  q1 = s->current_picture.f.qscale_table[mb_pos];
2401  dqscale_index = s->y_dc_scale_table[q1] - 1;
2402  if (dqscale_index < 0)
2403  return 0;
2404  if (c_avail && (n != 1 && n != 3)) {
2405  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2406  if (q2 && q2 != q1)
2407  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2408  }
2409  if (a_avail && (n != 2 && n != 3)) {
2410  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2411  if (q2 && q2 != q1)
2412  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2413  }
2414  if (a_avail && c_avail && (n != 3)) {
2415  int off = mb_pos;
2416  if (n != 1)
2417  off--;
2418  if (n != 2)
2419  off -= s->mb_stride;
2420  q2 = s->current_picture.f.qscale_table[off];
2421  if (q2 && q2 != q1)
2422  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2423  }
2424 
2425  if (a_avail && c_avail) {
2426  if (abs(a - b) <= abs(b - c)) {
2427  pred = c;
2428  *dir_ptr = 1; // left
2429  } else {
2430  pred = a;
2431  *dir_ptr = 0; // top
2432  }
2433  } else if (a_avail) {
2434  pred = a;
2435  *dir_ptr = 0; // top
2436  } else if (c_avail) {
2437  pred = c;
2438  *dir_ptr = 1; // left
2439  } else {
2440  pred = 0;
2441  *dir_ptr = 1; // left
2442  }
2443 
2444  /* update predictor */
2445  *dc_val_ptr = &dc_val[0];
2446  return pred;
2447 }
2448  // Block group
2450 
2457 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2458  uint8_t **coded_block_ptr)
2459 {
2460  int xy, wrap, pred, a, b, c;
2461 
2462  xy = s->block_index[n];
2463  wrap = s->b8_stride;
2464 
2465  /* B C
2466  * A X
2467  */
2468  a = s->coded_block[xy - 1 ];
2469  b = s->coded_block[xy - 1 - wrap];
2470  c = s->coded_block[xy - wrap];
2471 
2472  if (b == c) {
2473  pred = a;
2474  } else {
2475  pred = c;
2476  }
2477 
2478  /* store value */
2479  *coded_block_ptr = &s->coded_block[xy];
2480 
2481  return pred;
2482 }
2483 
2493 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2494  int *value, int codingset)
2495 {
2496  GetBitContext *gb = &v->s.gb;
2497  int index, escape, run = 0, level = 0, lst = 0;
2498 
2499  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2500  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2501  run = vc1_index_decode_table[codingset][index][0];
2502  level = vc1_index_decode_table[codingset][index][1];
2503  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2504  if (get_bits1(gb))
2505  level = -level;
2506  } else {
2507  escape = decode210(gb);
2508  if (escape != 2) {
2509  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2510  run = vc1_index_decode_table[codingset][index][0];
2511  level = vc1_index_decode_table[codingset][index][1];
2512  lst = index >= vc1_last_decode_table[codingset];
2513  if (escape == 0) {
2514  if (lst)
2515  level += vc1_last_delta_level_table[codingset][run];
2516  else
2517  level += vc1_delta_level_table[codingset][run];
2518  } else {
2519  if (lst)
2520  run += vc1_last_delta_run_table[codingset][level] + 1;
2521  else
2522  run += vc1_delta_run_table[codingset][level] + 1;
2523  }
2524  if (get_bits1(gb))
2525  level = -level;
2526  } else {
2527  int sign;
2528  lst = get_bits1(gb);
2529  if (v->s.esc3_level_length == 0) {
2530  if (v->pq < 8 || v->dquantfrm) { // table 59
2531  v->s.esc3_level_length = get_bits(gb, 3);
2532  if (!v->s.esc3_level_length)
2533  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2534  } else { // table 60
2535  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2536  }
2537  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2538  }
2539  run = get_bits(gb, v->s.esc3_run_length);
2540  sign = get_bits1(gb);
2541  level = get_bits(gb, v->s.esc3_level_length);
2542  if (sign)
2543  level = -level;
2544  }
2545  }
2546 
2547  *last = lst;
2548  *skip = run;
2549  *value = level;
2550 }
2551 
2559 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2560  int coded, int codingset)
2561 {
2562  GetBitContext *gb = &v->s.gb;
2563  MpegEncContext *s = &v->s;
2564  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2565  int i;
2566  int16_t *dc_val;
2567  int16_t *ac_val, *ac_val2;
2568  int dcdiff;
2569 
2570  /* Get DC differential */
2571  if (n < 4) {
2573  } else {
2575  }
2576  if (dcdiff < 0) {
2577  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2578  return -1;
2579  }
2580  if (dcdiff) {
2581  if (dcdiff == 119 /* ESC index value */) {
2582  /* TODO: Optimize */
2583  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2584  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2585  else dcdiff = get_bits(gb, 8);
2586  } else {
2587  if (v->pq == 1)
2588  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2589  else if (v->pq == 2)
2590  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2591  }
2592  if (get_bits1(gb))
2593  dcdiff = -dcdiff;
2594  }
2595 
2596  /* Prediction */
2597  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2598  *dc_val = dcdiff;
2599 
2600  /* Store the quantized DC coeff, used for prediction */
2601  if (n < 4) {
2602  block[0] = dcdiff * s->y_dc_scale;
2603  } else {
2604  block[0] = dcdiff * s->c_dc_scale;
2605  }
2606  /* Skip ? */
2607  if (!coded) {
2608  goto not_coded;
2609  }
2610 
2611  // AC Decoding
2612  i = 1;
2613 
2614  {
2615  int last = 0, skip, value;
2616  const uint8_t *zz_table;
2617  int scale;
2618  int k;
2619 
2620  scale = v->pq * 2 + v->halfpq;
2621 
2622  if (v->s.ac_pred) {
2623  if (!dc_pred_dir)
2624  zz_table = v->zz_8x8[2];
2625  else
2626  zz_table = v->zz_8x8[3];
2627  } else
2628  zz_table = v->zz_8x8[1];
2629 
2630  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2631  ac_val2 = ac_val;
2632  if (dc_pred_dir) // left
2633  ac_val -= 16;
2634  else // top
2635  ac_val -= 16 * s->block_wrap[n];
2636 
2637  while (!last) {
2638  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2639  i += skip;
2640  if (i > 63)
2641  break;
2642  block[zz_table[i++]] = value;
2643  }
2644 
2645  /* apply AC prediction if needed */
2646  if (s->ac_pred) {
2647  if (dc_pred_dir) { // left
2648  for (k = 1; k < 8; k++)
2649  block[k << v->left_blk_sh] += ac_val[k];
2650  } else { // top
2651  for (k = 1; k < 8; k++)
2652  block[k << v->top_blk_sh] += ac_val[k + 8];
2653  }
2654  }
2655  /* save AC coeffs for further prediction */
2656  for (k = 1; k < 8; k++) {
2657  ac_val2[k] = block[k << v->left_blk_sh];
2658  ac_val2[k + 8] = block[k << v->top_blk_sh];
2659  }
2660 
2661  /* scale AC coeffs */
2662  for (k = 1; k < 64; k++)
2663  if (block[k]) {
2664  block[k] *= scale;
2665  if (!v->pquantizer)
2666  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2667  }
2668 
2669  if (s->ac_pred) i = 63;
2670  }
2671 
2672 not_coded:
2673  if (!coded) {
2674  int k, scale;
2675  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2676  ac_val2 = ac_val;
2677 
2678  i = 0;
2679  scale = v->pq * 2 + v->halfpq;
2680  memset(ac_val2, 0, 16 * 2);
2681  if (dc_pred_dir) { // left
2682  ac_val -= 16;
2683  if (s->ac_pred)
2684  memcpy(ac_val2, ac_val, 8 * 2);
2685  } else { // top
2686  ac_val -= 16 * s->block_wrap[n];
2687  if (s->ac_pred)
2688  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2689  }
2690 
2691  /* apply AC prediction if needed */
2692  if (s->ac_pred) {
2693  if (dc_pred_dir) { //left
2694  for (k = 1; k < 8; k++) {
2695  block[k << v->left_blk_sh] = ac_val[k] * scale;
2696  if (!v->pquantizer && block[k << v->left_blk_sh])
2697  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2698  }
2699  } else { // top
2700  for (k = 1; k < 8; k++) {
2701  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2702  if (!v->pquantizer && block[k << v->top_blk_sh])
2703  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2704  }
2705  }
2706  i = 63;
2707  }
2708  }
2709  s->block_last_index[n] = i;
2710 
2711  return 0;
2712 }
2713 
2723  int coded, int codingset, int mquant)
2724 {
2725  GetBitContext *gb = &v->s.gb;
2726  MpegEncContext *s = &v->s;
2727  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2728  int i;
2729  int16_t *dc_val;
2730  int16_t *ac_val, *ac_val2;
2731  int dcdiff;
2732  int a_avail = v->a_avail, c_avail = v->c_avail;
2733  int use_pred = s->ac_pred;
2734  int scale;
2735  int q1, q2 = 0;
2736  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2737 
2738  /* Get DC differential */
2739  if (n < 4) {
2741  } else {
2743  }
2744  if (dcdiff < 0) {
2745  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2746  return -1;
2747  }
2748  if (dcdiff) {
2749  if (dcdiff == 119 /* ESC index value */) {
2750  /* TODO: Optimize */
2751  if (mquant == 1) dcdiff = get_bits(gb, 10);
2752  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2753  else dcdiff = get_bits(gb, 8);
2754  } else {
2755  if (mquant == 1)
2756  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2757  else if (mquant == 2)
2758  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2759  }
2760  if (get_bits1(gb))
2761  dcdiff = -dcdiff;
2762  }
2763 
2764  /* Prediction */
2765  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2766  *dc_val = dcdiff;
2767 
2768  /* Store the quantized DC coeff, used for prediction */
2769  if (n < 4) {
2770  block[0] = dcdiff * s->y_dc_scale;
2771  } else {
2772  block[0] = dcdiff * s->c_dc_scale;
2773  }
2774 
2775  //AC Decoding
2776  i = 1;
2777 
2778  /* check if AC is needed at all */
2779  if (!a_avail && !c_avail)
2780  use_pred = 0;
2781  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2782  ac_val2 = ac_val;
2783 
2784  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2785 
2786  if (dc_pred_dir) // left
2787  ac_val -= 16;
2788  else // top
2789  ac_val -= 16 * s->block_wrap[n];
2790 
2791  q1 = s->current_picture.f.qscale_table[mb_pos];
2792  if ( dc_pred_dir && c_avail && mb_pos)
2793  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2794  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2795  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2796  if ( dc_pred_dir && n == 1)
2797  q2 = q1;
2798  if (!dc_pred_dir && n == 2)
2799  q2 = q1;
2800  if (n == 3)
2801  q2 = q1;
2802 
2803  if (coded) {
2804  int last = 0, skip, value;
2805  const uint8_t *zz_table;
2806  int k;
2807 
2808  if (v->s.ac_pred) {
2809  if (!use_pred && v->fcm == ILACE_FRAME) {
2810  zz_table = v->zzi_8x8;
2811  } else {
2812  if (!dc_pred_dir) // top
2813  zz_table = v->zz_8x8[2];
2814  else // left
2815  zz_table = v->zz_8x8[3];
2816  }
2817  } else {
2818  if (v->fcm != ILACE_FRAME)
2819  zz_table = v->zz_8x8[1];
2820  else
2821  zz_table = v->zzi_8x8;
2822  }
2823 
2824  while (!last) {
2825  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2826  i += skip;
2827  if (i > 63)
2828  break;
2829  block[zz_table[i++]] = value;
2830  }
2831 
2832  /* apply AC prediction if needed */
2833  if (use_pred) {
2834  /* scale predictors if needed*/
2835  if (q2 && q1 != q2) {
2836  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2837  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2838 
2839  if (q1 < 1)
2840  return AVERROR_INVALIDDATA;
2841  if (dc_pred_dir) { // left
2842  for (k = 1; k < 8; k++)
2843  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2844  } else { // top
2845  for (k = 1; k < 8; k++)
2846  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2847  }
2848  } else {
2849  if (dc_pred_dir) { //left
2850  for (k = 1; k < 8; k++)
2851  block[k << v->left_blk_sh] += ac_val[k];
2852  } else { //top
2853  for (k = 1; k < 8; k++)
2854  block[k << v->top_blk_sh] += ac_val[k + 8];
2855  }
2856  }
2857  }
2858  /* save AC coeffs for further prediction */
2859  for (k = 1; k < 8; k++) {
2860  ac_val2[k ] = block[k << v->left_blk_sh];
2861  ac_val2[k + 8] = block[k << v->top_blk_sh];
2862  }
2863 
2864  /* scale AC coeffs */
2865  for (k = 1; k < 64; k++)
2866  if (block[k]) {
2867  block[k] *= scale;
2868  if (!v->pquantizer)
2869  block[k] += (block[k] < 0) ? -mquant : mquant;
2870  }
2871 
2872  if (use_pred) i = 63;
2873  } else { // no AC coeffs
2874  int k;
2875 
2876  memset(ac_val2, 0, 16 * 2);
2877  if (dc_pred_dir) { // left
2878  if (use_pred) {
2879  memcpy(ac_val2, ac_val, 8 * 2);
2880  if (q2 && q1 != q2) {
2881  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2882  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2883  if (q1 < 1)
2884  return AVERROR_INVALIDDATA;
2885  for (k = 1; k < 8; k++)
2886  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2887  }
2888  }
2889  } else { // top
2890  if (use_pred) {
2891  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2892  if (q2 && q1 != q2) {
2893  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2894  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2895  if (q1 < 1)
2896  return AVERROR_INVALIDDATA;
2897  for (k = 1; k < 8; k++)
2898  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2899  }
2900  }
2901  }
2902 
2903  /* apply AC prediction if needed */
2904  if (use_pred) {
2905  if (dc_pred_dir) { // left
2906  for (k = 1; k < 8; k++) {
2907  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2908  if (!v->pquantizer && block[k << v->left_blk_sh])
2909  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2910  }
2911  } else { // top
2912  for (k = 1; k < 8; k++) {
2913  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2914  if (!v->pquantizer && block[k << v->top_blk_sh])
2915  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2916  }
2917  }
2918  i = 63;
2919  }
2920  }
2921  s->block_last_index[n] = i;
2922 
2923  return 0;
2924 }
2925 
2935  int coded, int mquant, int codingset)
2936 {
2937  GetBitContext *gb = &v->s.gb;
2938  MpegEncContext *s = &v->s;
2939  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2940  int i;
2941  int16_t *dc_val;
2942  int16_t *ac_val, *ac_val2;
2943  int dcdiff;
2944  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2945  int a_avail = v->a_avail, c_avail = v->c_avail;
2946  int use_pred = s->ac_pred;
2947  int scale;
2948  int q1, q2 = 0;
2949 
2950  s->dsp.clear_block(block);
2951 
2952  /* XXX: Guard against dumb values of mquant */
2953  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2954 
2955  /* Set DC scale - y and c use the same */
2956  s->y_dc_scale = s->y_dc_scale_table[mquant];
2957  s->c_dc_scale = s->c_dc_scale_table[mquant];
2958 
2959  /* Get DC differential */
2960  if (n < 4) {
2962  } else {
2964  }
2965  if (dcdiff < 0) {
2966  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2967  return -1;
2968  }
2969  if (dcdiff) {
2970  if (dcdiff == 119 /* ESC index value */) {
2971  /* TODO: Optimize */
2972  if (mquant == 1) dcdiff = get_bits(gb, 10);
2973  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2974  else dcdiff = get_bits(gb, 8);
2975  } else {
2976  if (mquant == 1)
2977  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2978  else if (mquant == 2)
2979  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2980  }
2981  if (get_bits1(gb))
2982  dcdiff = -dcdiff;
2983  }
2984 
2985  /* Prediction */
2986  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2987  *dc_val = dcdiff;
2988 
2989  /* Store the quantized DC coeff, used for prediction */
2990 
2991  if (n < 4) {
2992  block[0] = dcdiff * s->y_dc_scale;
2993  } else {
2994  block[0] = dcdiff * s->c_dc_scale;
2995  }
2996 
2997  //AC Decoding
2998  i = 1;
2999 
3000  /* check if AC is needed at all and adjust direction if needed */
3001  if (!a_avail) dc_pred_dir = 1;
3002  if (!c_avail) dc_pred_dir = 0;
3003  if (!a_avail && !c_avail) use_pred = 0;
3004  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3005  ac_val2 = ac_val;
3006 
3007  scale = mquant * 2 + v->halfpq;
3008 
3009  if (dc_pred_dir) //left
3010  ac_val -= 16;
3011  else //top
3012  ac_val -= 16 * s->block_wrap[n];
3013 
3014  q1 = s->current_picture.f.qscale_table[mb_pos];
3015  if (dc_pred_dir && c_avail && mb_pos)
3016  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3017  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3018  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3019  if ( dc_pred_dir && n == 1)
3020  q2 = q1;
3021  if (!dc_pred_dir && n == 2)
3022  q2 = q1;
3023  if (n == 3) q2 = q1;
3024 
3025  if (coded) {
3026  int last = 0, skip, value;
3027  int k;
3028 
3029  while (!last) {
3030  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3031  i += skip;
3032  if (i > 63)
3033  break;
3034  if (v->fcm == PROGRESSIVE)
3035  block[v->zz_8x8[0][i++]] = value;
3036  else {
3037  if (use_pred && (v->fcm == ILACE_FRAME)) {
3038  if (!dc_pred_dir) // top
3039  block[v->zz_8x8[2][i++]] = value;
3040  else // left
3041  block[v->zz_8x8[3][i++]] = value;
3042  } else {
3043  block[v->zzi_8x8[i++]] = value;
3044  }
3045  }
3046  }
3047 
3048  /* apply AC prediction if needed */
3049  if (use_pred) {
3050  /* scale predictors if needed*/
3051  if (q2 && q1 != q2) {
3052  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3053  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3054 
3055  if (q1 < 1)
3056  return AVERROR_INVALIDDATA;
3057  if (dc_pred_dir) { // left
3058  for (k = 1; k < 8; k++)
3059  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3060  } else { //top
3061  for (k = 1; k < 8; k++)
3062  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3063  }
3064  } else {
3065  if (dc_pred_dir) { // left
3066  for (k = 1; k < 8; k++)
3067  block[k << v->left_blk_sh] += ac_val[k];
3068  } else { // top
3069  for (k = 1; k < 8; k++)
3070  block[k << v->top_blk_sh] += ac_val[k + 8];
3071  }
3072  }
3073  }
3074  /* save AC coeffs for further prediction */
3075  for (k = 1; k < 8; k++) {
3076  ac_val2[k ] = block[k << v->left_blk_sh];
3077  ac_val2[k + 8] = block[k << v->top_blk_sh];
3078  }
3079 
3080  /* scale AC coeffs */
3081  for (k = 1; k < 64; k++)
3082  if (block[k]) {
3083  block[k] *= scale;
3084  if (!v->pquantizer)
3085  block[k] += (block[k] < 0) ? -mquant : mquant;
3086  }
3087 
3088  if (use_pred) i = 63;
3089  } else { // no AC coeffs
3090  int k;
3091 
3092  memset(ac_val2, 0, 16 * 2);
3093  if (dc_pred_dir) { // left
3094  if (use_pred) {
3095  memcpy(ac_val2, ac_val, 8 * 2);
3096  if (q2 && q1 != q2) {
3097  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3098  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3099  if (q1 < 1)
3100  return AVERROR_INVALIDDATA;
3101  for (k = 1; k < 8; k++)
3102  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3103  }
3104  }
3105  } else { // top
3106  if (use_pred) {
3107  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3108  if (q2 && q1 != q2) {
3109  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3110  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3111  if (q1 < 1)
3112  return AVERROR_INVALIDDATA;
3113  for (k = 1; k < 8; k++)
3114  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3115  }
3116  }
3117  }
3118 
3119  /* apply AC prediction if needed */
3120  if (use_pred) {
3121  if (dc_pred_dir) { // left
3122  for (k = 1; k < 8; k++) {
3123  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3124  if (!v->pquantizer && block[k << v->left_blk_sh])
3125  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3126  }
3127  } else { // top
3128  for (k = 1; k < 8; k++) {
3129  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3130  if (!v->pquantizer && block[k << v->top_blk_sh])
3131  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3132  }
3133  }
3134  i = 63;
3135  }
3136  }
3137  s->block_last_index[n] = i;
3138 
3139  return 0;
3140 }
3141 
3144 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3145  int mquant, int ttmb, int first_block,
3146  uint8_t *dst, int linesize, int skip_block,
3147  int *ttmb_out)
3148 {
3149  MpegEncContext *s = &v->s;
3150  GetBitContext *gb = &s->gb;
3151  int i, j;
3152  int subblkpat = 0;
3153  int scale, off, idx, last, skip, value;
3154  int ttblk = ttmb & 7;
3155  int pat = 0;
3156 
3157  s->dsp.clear_block(block);
3158 
3159  if (ttmb == -1) {
3161  }
3162  if (ttblk == TT_4X4) {
3163  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3164  }
3165  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3166  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3167  || (!v->res_rtm_flag && !first_block))) {
3168  subblkpat = decode012(gb);
3169  if (subblkpat)
3170  subblkpat ^= 3; // swap decoded pattern bits
3171  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3172  ttblk = TT_8X4;
3173  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3174  ttblk = TT_4X8;
3175  }
3176  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3177 
3178  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3179  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3180  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3181  ttblk = TT_8X4;
3182  }
3183  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3184  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3185  ttblk = TT_4X8;
3186  }
3187  switch (ttblk) {
3188  case TT_8X8:
3189  pat = 0xF;
3190  i = 0;
3191  last = 0;
3192  while (!last) {
3193  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3194  i += skip;
3195  if (i > 63)
3196  break;
3197  if (!v->fcm)
3198  idx = v->zz_8x8[0][i++];
3199  else
3200  idx = v->zzi_8x8[i++];
3201  block[idx] = value * scale;
3202  if (!v->pquantizer)
3203  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3204  }
3205  if (!skip_block) {
3206  if (i == 1)
3207  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3208  else {
3209  v->vc1dsp.vc1_inv_trans_8x8(block);
3210  s->dsp.add_pixels_clamped(block, dst, linesize);
3211  }
3212  }
3213  break;
3214  case TT_4X4:
3215  pat = ~subblkpat & 0xF;
3216  for (j = 0; j < 4; j++) {
3217  last = subblkpat & (1 << (3 - j));
3218  i = 0;
3219  off = (j & 1) * 4 + (j & 2) * 16;
3220  while (!last) {
3221  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3222  i += skip;
3223  if (i > 15)
3224  break;
3225  if (!v->fcm)
3227  else
3228  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3229  block[idx + off] = value * scale;
3230  if (!v->pquantizer)
3231  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3232  }
3233  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3234  if (i == 1)
3235  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3236  else
3237  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3238  }
3239  }
3240  break;
3241  case TT_8X4:
3242  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3243  for (j = 0; j < 2; j++) {
3244  last = subblkpat & (1 << (1 - j));
3245  i = 0;
3246  off = j * 32;
3247  while (!last) {
3248  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3249  i += skip;
3250  if (i > 31)
3251  break;
3252  if (!v->fcm)
3253  idx = v->zz_8x4[i++] + off;
3254  else
3255  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3256  block[idx] = value * scale;
3257  if (!v->pquantizer)
3258  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3259  }
3260  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3261  if (i == 1)
3262  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3263  else
3264  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3265  }
3266  }
3267  break;
3268  case TT_4X8:
3269  pat = ~(subblkpat * 5) & 0xF;
3270  for (j = 0; j < 2; j++) {
3271  last = subblkpat & (1 << (1 - j));
3272  i = 0;
3273  off = j * 4;
3274  while (!last) {
3275  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3276  i += skip;
3277  if (i > 31)
3278  break;
3279  if (!v->fcm)
3280  idx = v->zz_4x8[i++] + off;
3281  else
3282  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3283  block[idx] = value * scale;
3284  if (!v->pquantizer)
3285  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3286  }
3287  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3288  if (i == 1)
3289  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3290  else
3291  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3292  }
3293  }
3294  break;
3295  }
3296  if (ttmb_out)
3297  *ttmb_out |= ttblk << (n * 4);
3298  return pat;
3299 }
3300  // Macroblock group
3302 
3303 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3304 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3305 
3307 {
3308  MpegEncContext *s = &v->s;
3309  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3310  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3311  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3312  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3313  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3314  uint8_t *dst;
3315 
3316  if (block_num > 3) {
3317  dst = s->dest[block_num - 3];
3318  } else {
3319  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3320  }
3321  if (s->mb_y != s->end_mb_y || block_num < 2) {
3322  int16_t (*mv)[2];
3323  int mv_stride;
3324 
3325  if (block_num > 3) {
3326  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3327  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3328  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3329  mv_stride = s->mb_stride;
3330  } else {
3331  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3332  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3333  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3334  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3335  mv_stride = s->b8_stride;
3336  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3337  }
3338 
3339  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3340  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3341  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3342  } else {
3343  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3344  if (idx == 3) {
3345  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3346  } else if (idx) {
3347  if (idx == 1)
3348  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3349  else
3350  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3351  }
3352  }
3353  }
3354 
3355  dst -= 4 * linesize;
3356  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3357  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3358  idx = (block_cbp | (block_cbp >> 2)) & 3;
3359  if (idx == 3) {
3360  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3361  } else if (idx) {
3362  if (idx == 1)
3363  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3364  else
3365  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3366  }
3367  }
3368 }
3369 
3371 {
3372  MpegEncContext *s = &v->s;
3373  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3374  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3375  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3376  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3377  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3378  uint8_t *dst;
3379 
3380  if (block_num > 3) {
3381  dst = s->dest[block_num - 3] - 8 * linesize;
3382  } else {
3383  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3384  }
3385 
3386  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3387  int16_t (*mv)[2];
3388 
3389  if (block_num > 3) {
3390  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3391  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3392  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3393  } else {
3394  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3395  : (mb_cbp >> ((block_num + 1) * 4));
3396  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3397  : (mb_is_intra >> ((block_num + 1) * 4));
3398  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3399  }
3400  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3401  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3402  } else {
3403  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3404  if (idx == 5) {
3405  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3406  } else if (idx) {
3407  if (idx == 1)
3408  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3409  else
3410  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3411  }
3412  }
3413  }
3414 
3415  dst -= 4;
3416  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3417  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3418  idx = (block_cbp | (block_cbp >> 1)) & 5;
3419  if (idx == 5) {
3420  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3421  } else if (idx) {
3422  if (idx == 1)
3423  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3424  else
3425  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3426  }
3427  }
3428 }
3429 
3431 {
3432  MpegEncContext *s = &v->s;
3433  int i;
3434 
3435  for (i = 0; i < 6; i++) {
3437  }
3438 
3439  /* V always precedes H, therefore we run H one MB before V;
3440  * at the end of a row, we catch up to complete the row */
3441  if (s->mb_x) {
3442  for (i = 0; i < 6; i++) {
3444  }
3445  if (s->mb_x == s->mb_width - 1) {
3446  s->mb_x++;
3448  for (i = 0; i < 6; i++) {
3450  }
3451  }
3452  }
3453 }
3454 
3458 {
3459  MpegEncContext *s = &v->s;
3460  GetBitContext *gb = &s->gb;
3461  int i, j;
3462  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3463  int cbp; /* cbp decoding stuff */
3464  int mqdiff, mquant; /* MB quantization */
3465  int ttmb = v->ttfrm; /* MB Transform type */
3466 
3467  int mb_has_coeffs = 1; /* last_flag */
3468  int dmv_x, dmv_y; /* Differential MV components */
3469  int index, index1; /* LUT indexes */
3470  int val, sign; /* temp values */
3471  int first_block = 1;
3472  int dst_idx, off;
3473  int skipped, fourmv;
3474  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3475 
3476  mquant = v->pq; /* lossy initialization */
3477 
3478  if (v->mv_type_is_raw)
3479  fourmv = get_bits1(gb);
3480  else
3481  fourmv = v->mv_type_mb_plane[mb_pos];
3482  if (v->skip_is_raw)
3483  skipped = get_bits1(gb);
3484  else
3485  skipped = v->s.mbskip_table[mb_pos];
3486 
3487  if (!fourmv) { /* 1MV mode */
3488  if (!skipped) {
3489  GET_MVDATA(dmv_x, dmv_y);
3490 
3491  if (s->mb_intra) {
3492  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3493  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3494  }
3496  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3497 
3498  /* FIXME Set DC val for inter block ? */
3499  if (s->mb_intra && !mb_has_coeffs) {
3500  GET_MQUANT();
3501  s->ac_pred = get_bits1(gb);
3502  cbp = 0;
3503  } else if (mb_has_coeffs) {
3504  if (s->mb_intra)
3505  s->ac_pred = get_bits1(gb);
3506  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3507  GET_MQUANT();
3508  } else {
3509  mquant = v->pq;
3510  cbp = 0;
3511  }
3512  s->current_picture.f.qscale_table[mb_pos] = mquant;
3513 
3514  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3515  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3516  VC1_TTMB_VLC_BITS, 2);
3517  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3518  dst_idx = 0;
3519  for (i = 0; i < 6; i++) {
3520  s->dc_val[0][s->block_index[i]] = 0;
3521  dst_idx += i >> 2;
3522  val = ((cbp >> (5 - i)) & 1);
3523  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3524  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3525  if (s->mb_intra) {
3526  /* check if prediction blocks A and C are available */
3527  v->a_avail = v->c_avail = 0;
3528  if (i == 2 || i == 3 || !s->first_slice_line)
3529  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3530  if (i == 1 || i == 3 || s->mb_x)
3531  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3532 
3533  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3534  (i & 4) ? v->codingset2 : v->codingset);
3535  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3536  continue;
3537  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3538  if (v->rangeredfrm)
3539  for (j = 0; j < 64; j++)
3540  s->block[i][j] <<= 1;
3541  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3542  if (v->pq >= 9 && v->overlap) {
3543  if (v->c_avail)
3544  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3545  if (v->a_avail)
3546  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3547  }
3548  block_cbp |= 0xF << (i << 2);
3549  block_intra |= 1 << i;
3550  } else if (val) {
3551  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3552  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3553  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3554  block_cbp |= pat << (i << 2);
3555  if (!v->ttmbf && ttmb < 8)
3556  ttmb = -1;
3557  first_block = 0;
3558  }
3559  }
3560  } else { // skipped
3561  s->mb_intra = 0;
3562  for (i = 0; i < 6; i++) {
3563  v->mb_type[0][s->block_index[i]] = 0;
3564  s->dc_val[0][s->block_index[i]] = 0;
3565  }
3566  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3567  s->current_picture.f.qscale_table[mb_pos] = 0;
3568  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3569  vc1_mc_1mv(v, 0);
3570  }
3571  } else { // 4MV mode
3572  if (!skipped /* unskipped MB */) {
3573  int intra_count = 0, coded_inter = 0;
3574  int is_intra[6], is_coded[6];
3575  /* Get CBPCY */
3576  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3577  for (i = 0; i < 6; i++) {
3578  val = ((cbp >> (5 - i)) & 1);
3579  s->dc_val[0][s->block_index[i]] = 0;
3580  s->mb_intra = 0;
3581  if (i < 4) {
3582  dmv_x = dmv_y = 0;
3583  s->mb_intra = 0;
3584  mb_has_coeffs = 0;
3585  if (val) {
3586  GET_MVDATA(dmv_x, dmv_y);
3587  }
3588  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3589  if (!s->mb_intra)
3590  vc1_mc_4mv_luma(v, i, 0);
3591  intra_count += s->mb_intra;
3592  is_intra[i] = s->mb_intra;
3593  is_coded[i] = mb_has_coeffs;
3594  }
3595  if (i & 4) {
3596  is_intra[i] = (intra_count >= 3);
3597  is_coded[i] = val;
3598  }
3599  if (i == 4)
3600  vc1_mc_4mv_chroma(v, 0);
3601  v->mb_type[0][s->block_index[i]] = is_intra[i];
3602  if (!coded_inter)
3603  coded_inter = !is_intra[i] & is_coded[i];
3604  }
3605  // if there are no coded blocks then don't do anything more
3606  dst_idx = 0;
3607  if (!intra_count && !coded_inter)
3608  goto end;
3609  GET_MQUANT();
3610  s->current_picture.f.qscale_table[mb_pos] = mquant;
3611  /* test if block is intra and has pred */
3612  {
3613  int intrapred = 0;
3614  for (i = 0; i < 6; i++)
3615  if (is_intra[i]) {
3616  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3617  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3618  intrapred = 1;
3619  break;
3620  }
3621  }
3622  if (intrapred)
3623  s->ac_pred = get_bits1(gb);
3624  else
3625  s->ac_pred = 0;
3626  }
3627  if (!v->ttmbf && coded_inter)
3629  for (i = 0; i < 6; i++) {
3630  dst_idx += i >> 2;
3631  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3632  s->mb_intra = is_intra[i];
3633  if (is_intra[i]) {
3634  /* check if prediction blocks A and C are available */
3635  v->a_avail = v->c_avail = 0;
3636  if (i == 2 || i == 3 || !s->first_slice_line)
3637  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3638  if (i == 1 || i == 3 || s->mb_x)
3639  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3640 
3641  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3642  (i & 4) ? v->codingset2 : v->codingset);
3643  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3644  continue;
3645  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3646  if (v->rangeredfrm)
3647  for (j = 0; j < 64; j++)
3648  s->block[i][j] <<= 1;
3649  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3650  (i & 4) ? s->uvlinesize : s->linesize);
3651  if (v->pq >= 9 && v->overlap) {
3652  if (v->c_avail)
3653  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3654  if (v->a_avail)
3655  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3656  }
3657  block_cbp |= 0xF << (i << 2);
3658  block_intra |= 1 << i;
3659  } else if (is_coded[i]) {
3660  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3661  first_block, s->dest[dst_idx] + off,
3662  (i & 4) ? s->uvlinesize : s->linesize,
3663  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3664  &block_tt);
3665  block_cbp |= pat << (i << 2);
3666  if (!v->ttmbf && ttmb < 8)
3667  ttmb = -1;
3668  first_block = 0;
3669  }
3670  }
3671  } else { // skipped MB
3672  s->mb_intra = 0;
3673  s->current_picture.f.qscale_table[mb_pos] = 0;
3674  for (i = 0; i < 6; i++) {
3675  v->mb_type[0][s->block_index[i]] = 0;
3676  s->dc_val[0][s->block_index[i]] = 0;
3677  }
3678  for (i = 0; i < 4; i++) {
3679  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3680  vc1_mc_4mv_luma(v, i, 0);
3681  }
3682  vc1_mc_4mv_chroma(v, 0);
3683  s->current_picture.f.qscale_table[mb_pos] = 0;
3684  }
3685  }
3686 end:
3687  v->cbp[s->mb_x] = block_cbp;
3688  v->ttblk[s->mb_x] = block_tt;
3689  v->is_intra[s->mb_x] = block_intra;
3690 
3691  return 0;
3692 }
3693 
3694 /* Decode one macroblock in an interlaced frame p picture */
3695 
3697 {
3698  MpegEncContext *s = &v->s;
3699  GetBitContext *gb = &s->gb;
3700  int i;
3701  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3702  int cbp = 0; /* cbp decoding stuff */
3703  int mqdiff, mquant; /* MB quantization */
3704  int ttmb = v->ttfrm; /* MB Transform type */
3705 
3706  int mb_has_coeffs = 1; /* last_flag */
3707  int dmv_x, dmv_y; /* Differential MV components */
3708  int val; /* temp value */
3709  int first_block = 1;
3710  int dst_idx, off;
3711  int skipped, fourmv = 0, twomv = 0;
3712  int block_cbp = 0, pat, block_tt = 0;
3713  int idx_mbmode = 0, mvbp;
3714  int stride_y, fieldtx;
3715 
3716  mquant = v->pq; /* Loosy initialization */
3717 
3718  if (v->skip_is_raw)
3719  skipped = get_bits1(gb);
3720  else
3721  skipped = v->s.mbskip_table[mb_pos];
3722  if (!skipped) {
3723  if (v->fourmvswitch)
3724  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3725  else
3726  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3727  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3728  /* store the motion vector type in a flag (useful later) */
3729  case MV_PMODE_INTFR_4MV:
3730  fourmv = 1;
3731  v->blk_mv_type[s->block_index[0]] = 0;
3732  v->blk_mv_type[s->block_index[1]] = 0;
3733  v->blk_mv_type[s->block_index[2]] = 0;
3734  v->blk_mv_type[s->block_index[3]] = 0;
3735  break;
3737  fourmv = 1;
3738  v->blk_mv_type[s->block_index[0]] = 1;
3739  v->blk_mv_type[s->block_index[1]] = 1;
3740  v->blk_mv_type[s->block_index[2]] = 1;
3741  v->blk_mv_type[s->block_index[3]] = 1;
3742  break;
3744  twomv = 1;
3745  v->blk_mv_type[s->block_index[0]] = 1;
3746  v->blk_mv_type[s->block_index[1]] = 1;
3747  v->blk_mv_type[s->block_index[2]] = 1;
3748  v->blk_mv_type[s->block_index[3]] = 1;
3749  break;
3750  case MV_PMODE_INTFR_1MV:
3751  v->blk_mv_type[s->block_index[0]] = 0;
3752  v->blk_mv_type[s->block_index[1]] = 0;
3753  v->blk_mv_type[s->block_index[2]] = 0;
3754  v->blk_mv_type[s->block_index[3]] = 0;
3755  break;
3756  }
3757  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3758  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3759  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3760  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3761  s->mb_intra = v->is_intra[s->mb_x] = 1;
3762  for (i = 0; i < 6; i++)
3763  v->mb_type[0][s->block_index[i]] = 1;
3764  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3765  mb_has_coeffs = get_bits1(gb);
3766  if (mb_has_coeffs)
3767  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3768  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3769  GET_MQUANT();
3770  s->current_picture.f.qscale_table[mb_pos] = mquant;
3771  /* Set DC scale - y and c use the same (not sure if necessary here) */
3772  s->y_dc_scale = s->y_dc_scale_table[mquant];
3773  s->c_dc_scale = s->c_dc_scale_table[mquant];
3774  dst_idx = 0;
3775  for (i = 0; i < 6; i++) {
3776  s->dc_val[0][s->block_index[i]] = 0;
3777  dst_idx += i >> 2;
3778  val = ((cbp >> (5 - i)) & 1);
3779  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3780  v->a_avail = v->c_avail = 0;
3781  if (i == 2 || i == 3 || !s->first_slice_line)
3782  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3783  if (i == 1 || i == 3 || s->mb_x)
3784  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3785 
3786  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3787  (i & 4) ? v->codingset2 : v->codingset);
3788  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3789  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3790  if (i < 4) {
3791  stride_y = s->linesize << fieldtx;
3792  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3793  } else {
3794  stride_y = s->uvlinesize;
3795  off = 0;
3796  }
3797  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3798  //TODO: loop filter
3799  }
3800 
3801  } else { // inter MB
3802  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3803  if (mb_has_coeffs)
3804  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3805  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3807  } else {
3808  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3809  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3811  }
3812  }
3813  s->mb_intra = v->is_intra[s->mb_x] = 0;
3814  for (i = 0; i < 6; i++)
3815  v->mb_type[0][s->block_index[i]] = 0;
3816  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3817  /* for all motion vector read MVDATA and motion compensate each block */
3818  dst_idx = 0;
3819  if (fourmv) {
3820  mvbp = v->fourmvbp;
3821  for (i = 0; i < 6; i++) {
3822  if (i < 4) {
3823  dmv_x = dmv_y = 0;
3824  val = ((mvbp >> (3 - i)) & 1);
3825  if (val) {
3826  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3827  }
3828  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3829  vc1_mc_4mv_luma(v, i, 0);
3830  } else if (i == 4) {
3831  vc1_mc_4mv_chroma4(v);
3832  }
3833  }
3834  } else if (twomv) {
3835  mvbp = v->twomvbp;
3836  dmv_x = dmv_y = 0;
3837  if (mvbp & 2) {
3838  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3839  }
3840  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3841  vc1_mc_4mv_luma(v, 0, 0);
3842  vc1_mc_4mv_luma(v, 1, 0);
3843  dmv_x = dmv_y = 0;
3844  if (mvbp & 1) {
3845  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3846  }
3847  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3848  vc1_mc_4mv_luma(v, 2, 0);
3849  vc1_mc_4mv_luma(v, 3, 0);
3850  vc1_mc_4mv_chroma4(v);
3851  } else {
3852  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3853  if (mvbp) {
3854  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3855  }
3856  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3857  vc1_mc_1mv(v, 0);
3858  }
3859  if (cbp)
3860  GET_MQUANT(); // p. 227
3861  s->current_picture.f.qscale_table[mb_pos] = mquant;
3862  if (!v->ttmbf && cbp)
3864  for (i = 0; i < 6; i++) {
3865  s->dc_val[0][s->block_index[i]] = 0;
3866  dst_idx += i >> 2;
3867  val = ((cbp >> (5 - i)) & 1);
3868  if (!fieldtx)
3869  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3870  else
3871  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3872  if (val) {
3873  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3874  first_block, s->dest[dst_idx] + off,
3875  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3876  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3877  block_cbp |= pat << (i << 2);
3878  if (!v->ttmbf && ttmb < 8)
3879  ttmb = -1;
3880  first_block = 0;
3881  }
3882  }
3883  }
3884  } else { // skipped
3885  s->mb_intra = v->is_intra[s->mb_x] = 0;
3886  for (i = 0; i < 6; i++) {
3887  v->mb_type[0][s->block_index[i]] = 0;
3888  s->dc_val[0][s->block_index[i]] = 0;
3889  }
3890  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3891  s->current_picture.f.qscale_table[mb_pos] = 0;
3892  v->blk_mv_type[s->block_index[0]] = 0;
3893  v->blk_mv_type[s->block_index[1]] = 0;
3894  v->blk_mv_type[s->block_index[2]] = 0;
3895  v->blk_mv_type[s->block_index[3]] = 0;
3896  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3897  vc1_mc_1mv(v, 0);
3898  }
3899  if (s->mb_x == s->mb_width - 1)
3900  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3901  return 0;
3902 }
3903 
3905 {
3906  MpegEncContext *s = &v->s;
3907  GetBitContext *gb = &s->gb;
3908  int i;
3909  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3910  int cbp = 0; /* cbp decoding stuff */
3911  int mqdiff, mquant; /* MB quantization */
3912  int ttmb = v->ttfrm; /* MB Transform type */
3913 
3914  int mb_has_coeffs = 1; /* last_flag */
3915  int dmv_x, dmv_y; /* Differential MV components */
3916  int val; /* temp values */
3917  int first_block = 1;
3918  int dst_idx, off;
3919  int pred_flag;
3920  int block_cbp = 0, pat, block_tt = 0;
3921  int idx_mbmode = 0;
3922 
3923  mquant = v->pq; /* Loosy initialization */
3924 
3925  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3926  if (idx_mbmode <= 1) { // intra MB
3927  s->mb_intra = v->is_intra[s->mb_x] = 1;
3928  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3929  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3930  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3931  GET_MQUANT();
3932  s->current_picture.f.qscale_table[mb_pos] = mquant;
3933  /* Set DC scale - y and c use the same (not sure if necessary here) */
3934  s->y_dc_scale = s->y_dc_scale_table[mquant];
3935  s->c_dc_scale = s->c_dc_scale_table[mquant];
3936  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3937  mb_has_coeffs = idx_mbmode & 1;
3938  if (mb_has_coeffs)
3939  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3940  dst_idx = 0;
3941  for (i = 0; i < 6; i++) {
3942  s->dc_val[0][s->block_index[i]] = 0;
3943  v->mb_type[0][s->block_index[i]] = 1;
3944  dst_idx += i >> 2;
3945  val = ((cbp >> (5 - i)) & 1);
3946  v->a_avail = v->c_avail = 0;
3947  if (i == 2 || i == 3 || !s->first_slice_line)
3948  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3949  if (i == 1 || i == 3 || s->mb_x)
3950  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3951 
3952  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3953  (i & 4) ? v->codingset2 : v->codingset);
3954  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3955  continue;
3956  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3957  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3958  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
3959  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3960  // TODO: loop filter
3961  }
3962  } else {
3963  s->mb_intra = v->is_intra[s->mb_x] = 0;
3964  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3965  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3966  if (idx_mbmode <= 5) { // 1-MV
3967  dmv_x = dmv_y = 0;
3968  if (idx_mbmode & 1) {
3969  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3970  }
3971  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3972  vc1_mc_1mv(v, 0);
3973  mb_has_coeffs = !(idx_mbmode & 2);
3974  } else { // 4-MV
3976  for (i = 0; i < 6; i++) {
3977  if (i < 4) {
3978  dmv_x = dmv_y = pred_flag = 0;
3979  val = ((v->fourmvbp >> (3 - i)) & 1);
3980  if (val) {
3981  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3982  }
3983  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3984  vc1_mc_4mv_luma(v, i, 0);
3985  } else if (i == 4)
3986  vc1_mc_4mv_chroma(v, 0);
3987  }
3988  mb_has_coeffs = idx_mbmode & 1;
3989  }
3990  if (mb_has_coeffs)
3991  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3992  if (cbp) {
3993  GET_MQUANT();
3994  }
3995  s->current_picture.f.qscale_table[mb_pos] = mquant;
3996  if (!v->ttmbf && cbp) {
3998  }
3999  dst_idx = 0;
4000  for (i = 0; i < 6; i++) {
4001  s->dc_val[0][s->block_index[i]] = 0;
4002  dst_idx += i >> 2;
4003  val = ((cbp >> (5 - i)) & 1);
4004  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4005  if (v->cur_field_type)
4006  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4007  if (val) {
4008  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4009  first_block, s->dest[dst_idx] + off,
4010  (i & 4) ? s->uvlinesize : s->linesize,
4011  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4012  &block_tt);
4013  block_cbp |= pat << (i << 2);
4014  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4015  first_block = 0;
4016  }
4017  }
4018  }
4019  if (s->mb_x == s->mb_width - 1)
4020  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4021  return 0;
4022 }
4023 
4027 {
4028  MpegEncContext *s = &v->s;
4029  GetBitContext *gb = &s->gb;
4030  int i, j;
4031  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4032  int cbp = 0; /* cbp decoding stuff */
4033  int mqdiff, mquant; /* MB quantization */
4034  int ttmb = v->ttfrm; /* MB Transform type */
4035  int mb_has_coeffs = 0; /* last_flag */
4036  int index, index1; /* LUT indexes */
4037  int val, sign; /* temp values */
4038  int first_block = 1;
4039  int dst_idx, off;
4040  int skipped, direct;
4041  int dmv_x[2], dmv_y[2];
4042  int bmvtype = BMV_TYPE_BACKWARD;
4043 
4044  mquant = v->pq; /* lossy initialization */
4045  s->mb_intra = 0;
4046 
4047  if (v->dmb_is_raw)
4048  direct = get_bits1(gb);
4049  else
4050  direct = v->direct_mb_plane[mb_pos];
4051  if (v->skip_is_raw)
4052  skipped = get_bits1(gb);
4053  else
4054  skipped = v->s.mbskip_table[mb_pos];
4055 
4056  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4057  for (i = 0; i < 6; i++) {
4058  v->mb_type[0][s->block_index[i]] = 0;
4059  s->dc_val[0][s->block_index[i]] = 0;
4060  }
4061  s->current_picture.f.qscale_table[mb_pos] = 0;
4062 
4063  if (!direct) {
4064  if (!skipped) {
4065  GET_MVDATA(dmv_x[0], dmv_y[0]);
4066  dmv_x[1] = dmv_x[0];
4067  dmv_y[1] = dmv_y[0];
4068  }
4069  if (skipped || !s->mb_intra) {
4070  bmvtype = decode012(gb);
4071  switch (bmvtype) {
4072  case 0:
4073  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4074  break;
4075  case 1:
4076  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4077  break;
4078  case 2:
4079  bmvtype = BMV_TYPE_INTERPOLATED;
4080  dmv_x[0] = dmv_y[0] = 0;
4081  }
4082  }
4083  }
4084  for (i = 0; i < 6; i++)
4085  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4086 
4087  if (skipped) {
4088  if (direct)
4089  bmvtype = BMV_TYPE_INTERPOLATED;
4090  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4091  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4092  return;
4093  }
4094  if (direct) {
4095  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4096  GET_MQUANT();
4097  s->mb_intra = 0;
4098  s->current_picture.f.qscale_table[mb_pos] = mquant;
4099  if (!v->ttmbf)
4101  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4102  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4103  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4104  } else {
4105  if (!mb_has_coeffs && !s->mb_intra) {
4106  /* no coded blocks - effectively skipped */
4107  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4108  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4109  return;
4110  }
4111  if (s->mb_intra && !mb_has_coeffs) {
4112  GET_MQUANT();
4113  s->current_picture.f.qscale_table[mb_pos] = mquant;
4114  s->ac_pred = get_bits1(gb);
4115  cbp = 0;
4116  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4117  } else {
4118  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4119  GET_MVDATA(dmv_x[0], dmv_y[0]);
4120  if (!mb_has_coeffs) {
4121  /* interpolated skipped block */
4122  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4123  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4124  return;
4125  }
4126  }
4127  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4128  if (!s->mb_intra) {
4129  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4130  }
4131  if (s->mb_intra)
4132  s->ac_pred = get_bits1(gb);
4133  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4134  GET_MQUANT();
4135  s->current_picture.f.qscale_table[mb_pos] = mquant;
4136  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4138  }
4139  }
4140  dst_idx = 0;
4141  for (i = 0; i < 6; i++) {
4142  s->dc_val[0][s->block_index[i]] = 0;
4143  dst_idx += i >> 2;
4144  val = ((cbp >> (5 - i)) & 1);
4145  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4146  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4147  if (s->mb_intra) {
4148  /* check if prediction blocks A and C are available */
4149  v->a_avail = v->c_avail = 0;
4150  if (i == 2 || i == 3 || !s->first_slice_line)
4151  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4152  if (i == 1 || i == 3 || s->mb_x)
4153  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4154 
4155  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4156  (i & 4) ? v->codingset2 : v->codingset);
4157  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4158  continue;
4159  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4160  if (v->rangeredfrm)
4161  for (j = 0; j < 64; j++)
4162  s->block[i][j] <<= 1;
4163  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4164  } else if (val) {
4165  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4166  first_block, s->dest[dst_idx] + off,
4167  (i & 4) ? s->uvlinesize : s->linesize,
4168  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4169  if (!v->ttmbf && ttmb < 8)
4170  ttmb = -1;
4171  first_block = 0;
4172  }
4173  }
4174 }
4175 
4179 {
4180  MpegEncContext *s = &v->s;
4181  GetBitContext *gb = &s->gb;
4182  int i, j;
4183  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4184  int cbp = 0; /* cbp decoding stuff */
4185  int mqdiff, mquant; /* MB quantization */
4186  int ttmb = v->ttfrm; /* MB Transform type */
4187  int mb_has_coeffs = 0; /* last_flag */
4188  int val; /* temp value */
4189  int first_block = 1;
4190  int dst_idx, off;
4191  int fwd;
4192  int dmv_x[2], dmv_y[2], pred_flag[2];
4193  int bmvtype = BMV_TYPE_BACKWARD;
4194  int idx_mbmode, interpmvp;
4195 
4196  mquant = v->pq; /* Loosy initialization */
4197  s->mb_intra = 0;
4198 
4199  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4200  if (idx_mbmode <= 1) { // intra MB
4201  s->mb_intra = v->is_intra[s->mb_x] = 1;
4202  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4203  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4204  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4205  GET_MQUANT();
4206  s->current_picture.f.qscale_table[mb_pos] = mquant;
4207  /* Set DC scale - y and c use the same (not sure if necessary here) */
4208  s->y_dc_scale = s->y_dc_scale_table[mquant];
4209  s->c_dc_scale = s->c_dc_scale_table[mquant];
4210  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4211  mb_has_coeffs = idx_mbmode & 1;
4212  if (mb_has_coeffs)
4213  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4214  dst_idx = 0;
4215  for (i = 0; i < 6; i++) {
4216  s->dc_val[0][s->block_index[i]] = 0;
4217  dst_idx += i >> 2;
4218  val = ((cbp >> (5 - i)) & 1);
4219  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4220  v->a_avail = v->c_avail = 0;
4221  if (i == 2 || i == 3 || !s->first_slice_line)
4222  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4223  if (i == 1 || i == 3 || s->mb_x)
4224  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4225 
4226  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4227  (i & 4) ? v->codingset2 : v->codingset);
4228  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4229  continue;
4230  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4231  if (v->rangeredfrm)
4232  for (j = 0; j < 64; j++)
4233  s->block[i][j] <<= 1;
4234  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4235  off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4236  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4237  // TODO: yet to perform loop filter
4238  }
4239  } else {
4240  s->mb_intra = v->is_intra[s->mb_x] = 0;
4241  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4242  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4243  if (v->fmb_is_raw)
4244  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4245  else
4246  fwd = v->forward_mb_plane[mb_pos];
4247  if (idx_mbmode <= 5) { // 1-MV
4248  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4249  pred_flag[0] = pred_flag[1] = 0;
4250  if (fwd)
4251  bmvtype = BMV_TYPE_FORWARD;
4252  else {
4253  bmvtype = decode012(gb);
4254  switch (bmvtype) {
4255  case 0:
4256  bmvtype = BMV_TYPE_BACKWARD;
4257  break;
4258  case 1:
4259  bmvtype = BMV_TYPE_DIRECT;
4260  break;
4261  case 2:
4262  bmvtype = BMV_TYPE_INTERPOLATED;
4263  interpmvp = get_bits1(gb);
4264  }
4265  }
4266  v->bmvtype = bmvtype;
4267  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4268  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4269  }
4270  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4271  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4272  }
4273  if (bmvtype == BMV_TYPE_DIRECT) {
4274  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4275  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4276  }
4277  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4278  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4279  mb_has_coeffs = !(idx_mbmode & 2);
4280  } else { // 4-MV
4281  if (fwd)
4282  bmvtype = BMV_TYPE_FORWARD;
4283  v->bmvtype = bmvtype;
4285  for (i = 0; i < 6; i++) {
4286  if (i < 4) {
4287  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4288  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4289  val = ((v->fourmvbp >> (3 - i)) & 1);
4290  if (val) {
4291  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4292  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4293  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4294  }
4295  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4296  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4297  } else if (i == 4)
4298  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4299  }
4300  mb_has_coeffs = idx_mbmode & 1;
4301  }
4302  if (mb_has_coeffs)
4303  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4304  if (cbp) {
4305  GET_MQUANT();
4306  }
4307  s->current_picture.f.qscale_table[mb_pos] = mquant;
4308  if (!v->ttmbf && cbp) {
4310  }
4311  dst_idx = 0;
4312  for (i = 0; i < 6; i++) {
4313  s->dc_val[0][s->block_index[i]] = 0;
4314  dst_idx += i >> 2;
4315  val = ((cbp >> (5 - i)) & 1);
4316  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4317  if (v->cur_field_type)
4318  off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4319  if (val) {
4320  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4321  first_block, s->dest[dst_idx] + off,
4322  (i & 4) ? s->uvlinesize : s->linesize,
4323  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4324  if (!v->ttmbf && ttmb < 8)
4325  ttmb = -1;
4326  first_block = 0;
4327  }
4328  }
4329  }
4330 }
4331 
4335 {
4336  int k, j;
4337  MpegEncContext *s = &v->s;
4338  int cbp, val;
4339  uint8_t *coded_val;
4340  int mb_pos;
4341 
4342  /* select codingmode used for VLC tables selection */
4343  switch (v->y_ac_table_index) {
4344  case 0:
4346  break;
4347  case 1:
4349  break;
4350  case 2:
4352  break;
4353  }
4354 
4355  switch (v->c_ac_table_index) {
4356  case 0:
4358  break;
4359  case 1:
4361  break;
4362  case 2:
4364  break;
4365  }
4366 
4367  /* Set DC scale - y and c use the same */
4368  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4369  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4370 
4371  //do frame decode
4372  s->mb_x = s->mb_y = 0;
4373  s->mb_intra = 1;
4374  s->first_slice_line = 1;
4375  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4376  s->mb_x = 0;
4378  for (; s->mb_x < s->mb_width; s->mb_x++) {
4379  uint8_t *dst[6];
4381  dst[0] = s->dest[0];
4382  dst[1] = dst[0] + 8;
4383  dst[2] = s->dest[0] + s->linesize * 8;
4384  dst[3] = dst[2] + 8;
4385  dst[4] = s->dest[1];
4386  dst[5] = s->dest[2];
4387  s->dsp.clear_blocks(s->block[0]);
4388  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4389  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4390  s->current_picture.f.qscale_table[mb_pos] = v->pq;
4391  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4392  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4393 
4394  // do actual MB decoding and displaying
4396  v->s.ac_pred = get_bits1(&v->s.gb);
4397 
4398  for (k = 0; k < 6; k++) {
4399  val = ((cbp >> (5 - k)) & 1);
4400 
4401  if (k < 4) {
4402  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4403  val = val ^ pred;
4404  *coded_val = val;
4405  }
4406  cbp |= val << (5 - k);
4407 
4408  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4409 
4410  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4411  continue;
4412  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4413  if (v->pq >= 9 && v->overlap) {
4414  if (v->rangeredfrm)
4415  for (j = 0; j < 64; j++)
4416  s->block[k][j] <<= 1;
4417  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4418  } else {
4419  if (v->rangeredfrm)
4420  for (j = 0; j < 64; j++)
4421  s->block[k][j] = (s->block[k][j] - 64) << 1;
4422  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4423  }
4424  }
4425 
4426  if (v->pq >= 9 && v->overlap) {
4427  if (s->mb_x) {
4428  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4429  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4430  if (!(s->flags & CODEC_FLAG_GRAY)) {
4431  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4432  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4433  }
4434  }
4435  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4436  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4437  if (!s->first_slice_line) {
4438  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4439  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4440  if (!(s->flags & CODEC_FLAG_GRAY)) {
4441  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4442  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4443  }
4444  }
4445  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4446  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4447  }
4448  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4449 
4450  if (get_bits_count(&s->gb) > v->bits) {
4451  ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4452  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4453  get_bits_count(&s->gb), v->bits);
4454  return;
4455  }
4456  }
4457  if (!v->s.loop_filter)
4458  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4459  else if (s->mb_y)
4460  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4461 
4462  s->first_slice_line = 0;
4463  }
4464  if (v->s.loop_filter)
4465  ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4466  ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4467 }
4468 
4472 {
4473  int k;
4474  MpegEncContext *s = &v->s;
4475  int cbp, val;
4476  uint8_t *coded_val;
4477  int mb_pos;
4478  int mquant = v->pq;
4479  int mqdiff;
4480  GetBitContext *gb = &s->gb;
4481 
4482  /* select codingmode used for VLC tables selection */
4483  switch (v->y_ac_table_index) {
4484  case 0:
4486  break;
4487  case 1:
4489  break;
4490  case 2:
4492  break;
4493  }
4494 
4495  switch (v->c_ac_table_index) {
4496  case 0:
4498  break;
4499  case 1:
4501  break;
4502  case 2:
4504  break;
4505  }
4506 
4507  // do frame decode
4508  s->mb_x = s->mb_y = 0;
4509  s->mb_intra = 1;
4510  s->first_slice_line = 1;
4511  s->mb_y = s->start_mb_y;
4512  if (s->start_mb_y) {
4513  s->mb_x = 0;
4515  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4516  (1 + s->b8_stride) * sizeof(*s->coded_block));
4517  }
4518  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4519  s->mb_x = 0;
4521  for (;s->mb_x < s->mb_width; s->mb_x++) {
4522  DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4524  s->dsp.clear_blocks(block[0]);
4525  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4526  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4527  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4528  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4529 
4530  // do actual MB decoding and displaying
4531  if (v->fieldtx_is_raw)
4532  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4534  if ( v->acpred_is_raw)
4535  v->s.ac_pred = get_bits1(&v->s.gb);
4536  else
4537  v->s.ac_pred = v->acpred_plane[mb_pos];
4538 
4539  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4540  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4541 
4542  GET_MQUANT();
4543 
4544  s->current_picture.f.qscale_table[mb_pos] = mquant;
4545  /* Set DC scale - y and c use the same */
4546  s->y_dc_scale = s->y_dc_scale_table[mquant];
4547  s->c_dc_scale = s->c_dc_scale_table[mquant];
4548 
4549  for (k = 0; k < 6; k++) {
4550  val = ((cbp >> (5 - k)) & 1);
4551 
4552  if (k < 4) {
4553  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4554  val = val ^ pred;
4555  *coded_val = val;
4556  }
4557  cbp |= val << (5 - k);
4558 
4559  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4560  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4561 
4562  vc1_decode_i_block_adv(v, block[k], k, val,
4563  (k < 4) ? v->codingset : v->codingset2, mquant);
4564 
4565  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4566  continue;
4568  }
4569 
4573 
4574  if (get_bits_count(&s->gb) > v->bits) {
4575  // TODO: may need modification to handle slice coding
4576  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4577  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4578  get_bits_count(&s->gb), v->bits);
4579  return;
4580  }
4581  }
4582  if (!v->s.loop_filter)
4583  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4584  else if (s->mb_y)
4585  ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4586  s->first_slice_line = 0;
4587  }
4588 
4589  /* raw bottom MB row */
4590  s->mb_x = 0;
4592  for (;s->mb_x < s->mb_width; s->mb_x++) {
4595  if (v->s.loop_filter)
4597  }
4598  if (v->s.loop_filter)
4599  ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4600  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4601  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4602 }
4603 
4605 {
4606  MpegEncContext *s = &v->s;
4607  int apply_loop_filter;
4608 
4609  /* select codingmode used for VLC tables selection */
4610  switch (v->c_ac_table_index) {
4611  case 0:
4613  break;
4614  case 1:
4616  break;
4617  case 2:
4619  break;
4620  }
4621 
4622  switch (v->c_ac_table_index) {
4623  case 0:
4625  break;
4626  case 1:
4628  break;
4629  case 2:
4631  break;
4632  }
4633 
4634  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4635  s->first_slice_line = 1;
4636  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4637  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4638  s->mb_x = 0;
4640  for (; s->mb_x < s->mb_width; s->mb_x++) {
4642 
4643  if (v->fcm == ILACE_FIELD)
4645  else if (v->fcm == ILACE_FRAME)
4647  else vc1_decode_p_mb(v);
4648  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4650  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4651  // TODO: may need modification to handle slice coding
4652  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4653  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4654  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4655  return;
4656  }
4657  }
4658  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4659  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4660  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4661  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4662  if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4663  s->first_slice_line = 0;
4664  }
4665  if (apply_loop_filter) {
4666  s->mb_x = 0;
4668  for (; s->mb_x < s->mb_width; s->mb_x++) {
4671  }
4672  }
4673  if (s->end_mb_y >= s->start_mb_y)
4674  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4675  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4676  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4677 }
4678 
4680 {
4681  MpegEncContext *s = &v->s;
4682 
4683  /* select codingmode used for VLC tables selection */
4684  switch (v->c_ac_table_index) {
4685  case 0:
4687  break;
4688  case 1:
4690  break;
4691  case 2:
4693  break;
4694  }
4695 
4696  switch (v->c_ac_table_index) {
4697  case 0:
4699  break;
4700  case 1:
4702  break;
4703  case 2:
4705  break;
4706  }
4707 
4708  s->first_slice_line = 1;
4709  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4710  s->mb_x = 0;
4712  for (; s->mb_x < s->mb_width; s->mb_x++) {
4714 
4715  if (v->fcm == ILACE_FIELD)
4717  else
4718  vc1_decode_b_mb(v);
4719  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4720  // TODO: may need modification to handle slice coding
4721  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4722  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4723  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4724  return;
4725  }
4726  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4727  }
4728  if (!v->s.loop_filter)
4729  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4730  else if (s->mb_y)
4731  ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4732  s->first_slice_line = 0;
4733  }
4734  if (v->s.loop_filter)
4735  ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4736  ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4737  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4738 }
4739 
4741 {
4742  MpegEncContext *s = &v->s;
4743 
4744  if (!v->s.last_picture.f.data[0])
4745  return;
4746 
4747  ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4748  s->first_slice_line = 1;
4749  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4750  s->mb_x = 0;
4753  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4754  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4755  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4756  ff_draw_horiz_band(s, s->mb_y * 16, 16);
4757  s->first_slice_line = 0;
4758  }
4760 }
4761 
4763 {
4764 
4765  v->s.esc3_level_length = 0;
4766  if (v->x8_type) {
4767  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4768  } else {
4769  v->cur_blk_idx = 0;
4770  v->left_blk_idx = -1;
4771  v->topleft_blk_idx = 1;
4772  v->top_blk_idx = 2;
4773  switch (v->s.pict_type) {
4774  case AV_PICTURE_TYPE_I:
4775  if (v->profile == PROFILE_ADVANCED)
4777  else
4779  break;
4780  case AV_PICTURE_TYPE_P:
4781  if (v->p_frame_skipped)
4783  else
4785  break;
4786  case AV_PICTURE_TYPE_B:
4787  if (v->bi_type) {
4788  if (v->profile == PROFILE_ADVANCED)
4790  else
4792  } else
4794  break;
4795  }
4796  }
4797 }
4798 
4799 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4800 
4801 typedef struct {
4813  int coefs[2][7];
4814 
4815  int effect_type, effect_flag;
4816  int effect_pcount1, effect_pcount2;
4817  int effect_params1[15], effect_params2[10];
4818 } SpriteData;
4819 
4820 static inline int get_fp_val(GetBitContext* gb)
4821 {
4822  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4823 }
4824 
4825 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4826 {
4827  c[1] = c[3] = 0;
4828 
4829  switch (get_bits(gb, 2)) {
4830  case 0:
4831  c[0] = 1 << 16;
4832  c[2] = get_fp_val(gb);
4833  c[4] = 1 << 16;
4834  break;
4835  case 1:
4836  c[0] = c[4] = get_fp_val(gb);
4837  c[2] = get_fp_val(gb);
4838  break;
4839  case 2:
4840  c[0] = get_fp_val(gb);
4841  c[2] = get_fp_val(gb);
4842  c[4] = get_fp_val(gb);
4843  break;
4844  case 3:
4845  c[0] = get_fp_val(gb);
4846  c[1] = get_fp_val(gb);
4847  c[2] = get_fp_val(gb);
4848  c[3] = get_fp_val(gb);
4849  c[4] = get_fp_val(gb);
4850  break;
4851  }
4852  c[5] = get_fp_val(gb);
4853  if (get_bits1(gb))
4854  c[6] = get_fp_val(gb);
4855  else
4856  c[6] = 1 << 16;
4857 }
4858 
4859 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4860 {
4861  AVCodecContext *avctx = v->s.avctx;
4862  int sprite, i;
4863 
4864  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4865  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4866  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4867  av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4868  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4869  for (i = 0; i < 7; i++)
4870  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4871  sd->coefs[sprite][i] / (1<<16),
4872  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4873  av_log(avctx, AV_LOG_DEBUG, "\n");
4874  }
4875 
4876  skip_bits(gb, 2);
4877  if (sd->effect_type = get_bits_long(gb, 30)) {
4878  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4879  case 7:
4880  vc1_sprite_parse_transform(gb, sd->effect_params1);
4881  break;
4882  case 14:
4883  vc1_sprite_parse_transform(gb, sd->effect_params1);
4884  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4885  break;
4886  default:
4887  for (i = 0; i < sd->effect_pcount1; i++)
4888  sd->effect_params1[i] = get_fp_val(gb);
4889  }
4890  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4891  // effect 13 is simple alpha blending and matches the opacity above
4892  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4893  for (i = 0; i < sd->effect_pcount1; i++)
4894  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4895  sd->effect_params1[i] / (1 << 16),
4896  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4897  av_log(avctx, AV_LOG_DEBUG, "\n");
4898  }
4899 
4900  sd->effect_pcount2 = get_bits(gb, 16);
4901  if (sd->effect_pcount2 > 10) {
4902  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
4903  return;
4904  } else if (sd->effect_pcount2) {
4905  i = -1;
4906  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
4907  while (++i < sd->effect_pcount2) {
4908  sd->effect_params2[i] = get_fp_val(gb);
4909  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4910  sd->effect_params2[i] / (1 << 16),
4911  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4912  }
4913  av_log(avctx, AV_LOG_DEBUG, "\n");
4914  }
4915  }
4916  if (sd->effect_flag = get_bits1(gb))
4917  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
4918 
4919  if (get_bits_count(gb) >= gb->size_in_bits +
4920  (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
4921  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
4922  if (get_bits_count(gb) < gb->size_in_bits - 8)
4923  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
4924 }
4925 
4926 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
4927 {
4928  int i, plane, row, sprite;
4929  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4930  uint8_t* src_h[2][2];
4931  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4932  int ysub[2];
4933  MpegEncContext *s = &v->s;
4934 
4935  for (i = 0; i < 2; i++) {
4936  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
4937  xadv[i] = sd->coefs[i][0];
4938  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
4939  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
4940 
4941  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
4942  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
4943  }
4944  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4945 
4946  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
4947  int width = v->output_width>>!!plane;
4948 
4949  for (row = 0; row < v->output_height>>!!plane; row++) {
4950  uint8_t *dst = v->sprite_output_frame.data[plane] +
4951  v->sprite_output_frame.linesize[plane] * row;
4952 
4953  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4954  uint8_t *iplane = s->current_picture.f.data[plane];
4955  int iline = s->current_picture.f.linesize[plane];
4956  int ycoord = yoff[sprite] + yadv[sprite] * row;
4957  int yline = ycoord >> 16;
4958  ysub[sprite] = ycoord & 0xFFFF;
4959  if (sprite) {
4960  iplane = s->last_picture.f.data[plane];
4961  iline = s->last_picture.f.linesize[plane];
4962  }
4963  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4964  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4965  if (ysub[sprite])
4966  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
4967  } else {
4968  if (sr_cache[sprite][0] != yline) {
4969  if (sr_cache[sprite][1] == yline) {
4970  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
4971  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
4972  } else {
4973  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4974  sr_cache[sprite][0] = yline;
4975  }
4976  }
4977  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4978  v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
4979  sr_cache[sprite][1] = yline + 1;
4980  }
4981  src_h[sprite][0] = v->sr_rows[sprite][0];
4982  src_h[sprite][1] = v->sr_rows[sprite][1];
4983  }
4984  }
4985 
4986  if (!v->two_sprites) {
4987  if (ysub[0]) {
4988  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
4989  } else {
4990  memcpy(dst, src_h[0][0], width);
4991  }
4992  } else {
4993  if (ysub[0] && ysub[1]) {
4994  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
4995  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
4996  } else if (ysub[0]) {
4997  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
4998  src_h[1][0], alpha, width);
4999  } else if (ysub[1]) {
5000  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5001  src_h[0][0], (1<<16)-1-alpha, width);
5002  } else {
5003  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5004  }
5005  }
5006  }
5007 
5008  if (!plane) {
5009  for (i = 0; i < 2; i++) {
5010  xoff[i] >>= 1;
5011  yoff[i] >>= 1;
5012  }
5013  }
5014 
5015  }
5016 }
5017 
5018 
5019 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5020 {
5021  MpegEncContext *s = &v->s;
5022  AVCodecContext *avctx = s->avctx;
5023  SpriteData sd;
5024 
5025  vc1_parse_sprites(v, gb, &sd);
5026 
5027  if (!s->current_picture.f.data[0]) {
5028  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5029  return -1;
5030  }
5031 
5032  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5033  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5034  v->two_sprites = 0;
5035  }
5036 
5037  if (v->sprite_output_frame.data[0])
5038  avctx->release_buffer(avctx, &v->sprite_output_frame);
5039 
5042  if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5043  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5044  return -1;
5045  }
5046 
5047  vc1_draw_sprites(v, &sd);
5048 
5049  return 0;
5050 }
5051 
5052 static void vc1_sprite_flush(AVCodecContext *avctx)
5053 {
5054  VC1Context *v = avctx->priv_data;
5055  MpegEncContext *s = &v->s;
5056  AVFrame *f = &s->current_picture.f;
5057  int plane, i;
5058 
5059  /* Windows Media Image codecs have a convergence interval of two keyframes.
5060  Since we can't enforce it, clear to black the missing sprite. This is
5061  wrong but it looks better than doing nothing. */
5062 
5063  if (f->data[0])
5064  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5065  for (i = 0; i < v->sprite_height>>!!plane; i++)
5066  memset(f->data[plane] + i * f->linesize[plane],
5067  plane ? 128 : 0, f->linesize[plane]);
5068 }
5069 
5070 #endif
5071 
5073 {
5074  MpegEncContext *s = &v->s;
5075  int i;
5076 
5077  /* Allocate mb bitplanes */
5082  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5084 
5085  v->n_allocated_blks = s->mb_width + 2;
5086  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5087  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5088  v->cbp = v->cbp_base + s->mb_stride;
5089  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5090  v->ttblk = v->ttblk_base + s->mb_stride;
5091  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5092  v->is_intra = v->is_intra_base + s->mb_stride;
5093  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5094  v->luma_mv = v->luma_mv_base + s->mb_stride;
5095 
5096  /* allocate block type info in that way so it could be used with s->block_index[] */
5097  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5098  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5099  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5100  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5101 
5102  /* allocate memory to store block level MV info */
5103  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5104  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5105  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5106  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5107  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5108  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5109  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5110  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5111  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5112  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5113  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5114 
5115  /* Init coded blocks info */
5116  if (v->profile == PROFILE_ADVANCED) {
5117 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5118 // return -1;
5119 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5120 // return -1;
5121  }
5122 
5123  ff_intrax8_common_init(&v->x8,s);
5124 
5126  for (i = 0; i < 4; i++)
5127  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5128  }
5129 
5130  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5131  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5132  !v->mb_type_base) {
5135  av_freep(&v->acpred_plane);
5137  av_freep(&v->block);
5138  av_freep(&v->cbp_base);
5139  av_freep(&v->ttblk_base);
5140  av_freep(&v->is_intra_base);
5141  av_freep(&v->luma_mv_base);
5142  av_freep(&v->mb_type_base);
5143  return AVERROR(ENOMEM);
5144  }
5145 
5146  return 0;
5147 }
5148 
5154 {
5155  VC1Context *v = avctx->priv_data;
5156  MpegEncContext *s = &v->s;
5157  GetBitContext gb;
5158  int i;
5159 
5160  /* save the container output size for WMImage */
5161  v->output_width = avctx->width;
5162  v->output_height = avctx->height;
5163 
5164  if (!avctx->extradata_size || !avctx->extradata)
5165  return -1;
5166  if (!(avctx->flags & CODEC_FLAG_GRAY))
5167  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5168  else
5169  avctx->pix_fmt = PIX_FMT_GRAY8;
5170  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5171  v->s.avctx = avctx;
5172  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5173  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5174 
5175  if (avctx->idct_algo == FF_IDCT_AUTO) {
5176  avctx->idct_algo = FF_IDCT_WMV2;
5177  }
5178 
5179  if (ff_vc1_init_common(v) < 0)
5180  return -1;
5181  ff_vc1dsp_init(&v->vc1dsp);
5182 
5183  if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5184  int count = 0;
5185 
5186  // looks like WMV3 has a sequence header stored in the extradata
5187  // advanced sequence header may be before the first frame
5188  // the last byte of the extradata is a version number, 1 for the
5189  // samples we can decode
5190 
5191  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5192 
5193  if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5194  return -1;
5195 
5196  count = avctx->extradata_size*8 - get_bits_count(&gb);
5197  if (count > 0) {
5198  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5199  count, get_bits(&gb, count));
5200  } else if (count < 0) {
5201  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5202  }
5203  } else { // VC1/WVC1/WVP2
5204  const uint8_t *start = avctx->extradata;
5205  uint8_t *end = avctx->extradata + avctx->extradata_size;
5206  const uint8_t *next;
5207  int size, buf2_size;
5208  uint8_t *buf2 = NULL;
5209  int seq_initialized = 0, ep_initialized = 0;
5210 
5211  if (avctx->extradata_size < 16) {
5212  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5213  return -1;
5214  }
5215 
5217  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5218  next = start;
5219  for (; next < end; start = next) {
5220  next = find_next_marker(start + 4, end);
5221  size = next - start - 4;
5222  if (size <= 0)
5223  continue;
5224  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5225  init_get_bits(&gb, buf2, buf2_size * 8);
5226  switch (AV_RB32(start)) {
5227  case VC1_CODE_SEQHDR:
5228  if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5229  av_free(buf2);
5230  return -1;
5231  }
5232  seq_initialized = 1;
5233  break;
5234  case VC1_CODE_ENTRYPOINT:
5235  if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5236  av_free(buf2);
5237  return -1;
5238  }
5239  ep_initialized = 1;
5240  break;
5241  }
5242  }
5243  av_free(buf2);
5244  if (!seq_initialized || !ep_initialized) {
5245  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5246  return -1;
5247  }
5248  v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5249  }
5250 
5251  avctx->profile = v->profile;
5252  if (v->profile == PROFILE_ADVANCED)
5253  avctx->level = v->level;
5254 
5255  avctx->has_b_frames = !!avctx->max_b_frames;
5256 
5257  s->mb_width = (avctx->coded_width + 15) >> 4;
5258  s->mb_height = (avctx->coded_height + 15) >> 4;
5259 
5260  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5261  for (i = 0; i < 64; i++) {
5262 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5263  v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5264  v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5265  v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5266  v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5268  }
5269  v->left_blk_sh = 0;
5270  v->top_blk_sh = 3;
5271  } else {
5272  memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5273  v->left_blk_sh = 3;
5274  v->top_blk_sh = 0;
5275  }
5276 
5277  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5278  v->sprite_width = avctx->coded_width;
5279  v->sprite_height = avctx->coded_height;
5280 
5281  avctx->coded_width = avctx->width = v->output_width;
5282  avctx->coded_height = avctx->height = v->output_height;
5283 
5284  // prevent 16.16 overflows
5285  if (v->sprite_width > 1 << 14 ||
5286  v->sprite_height > 1 << 14 ||
5287  v->output_width > 1 << 14 ||
5288  v->output_height > 1 << 14) return -1;
5289  }
5290  return 0;
5291 }
5292 
5297 {
5298  VC1Context *v = avctx->priv_data;
5299  int i;
5300 
5301  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5302  && v->sprite_output_frame.data[0])
5303  avctx->release_buffer(avctx, &v->sprite_output_frame);
5304  for (i = 0; i < 4; i++)
5305  av_freep(&v->sr_rows[i >> 1][i & 1]);
5306  av_freep(&v->hrd_rate);
5307  av_freep(&v->hrd_buffer);
5308  MPV_common_end(&v->s);
5312  av_freep(&v->fieldtx_plane);
5313  av_freep(&v->acpred_plane);
5315  av_freep(&v->mb_type_base);
5317  av_freep(&v->mv_f_base);
5318  av_freep(&v->mv_f_last_base);
5319  av_freep(&v->mv_f_next_base);
5320  av_freep(&v->block);
5321  av_freep(&v->cbp_base);
5322  av_freep(&v->ttblk_base);
5323  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5324  av_freep(&v->luma_mv_base);
5326  return 0;
5327 }
5328 
5329 
5333 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5334  int *data_size, AVPacket *avpkt)
5335 {
5336  const uint8_t *buf = avpkt->data;
5337  int buf_size = avpkt->size, n_slices = 0, i;
5338  VC1Context *v = avctx->priv_data;
5339  MpegEncContext *s = &v->s;
5340  AVFrame *pict = data;
5341  uint8_t *buf2 = NULL;
5342  const uint8_t *buf_start = buf;
5343  int mb_height, n_slices1;
5344  struct {
5345  uint8_t *buf;
5346  GetBitContext gb;
5347  int mby_start;
5348  } *slices = NULL, *tmp;
5349 
5350  /* no supplementary picture */
5351  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5352  /* special case for last picture */
5353  if (s->low_delay == 0 && s->next_picture_ptr) {
5354  *pict = *(AVFrame*)s->next_picture_ptr;
5355  s->next_picture_ptr = NULL;
5356 
5357  *data_size = sizeof(AVFrame);
5358  }
5359 
5360  return 0;
5361  }
5362 
5364  if (v->profile < PROFILE_ADVANCED)
5365  avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5366  else
5367  avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5368  }
5369 
5370  //for advanced profile we may need to parse and unescape data
5371  if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5372  int buf_size2 = 0;
5373  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5374 
5375  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5376  const uint8_t *start, *end, *next;
5377  int size;
5378 
5379  next = buf;
5380  for (start = buf, end = buf + buf_size; next < end; start = next) {
5381  next = find_next_marker(start + 4, end);
5382  size = next - start - 4;
5383  if (size <= 0) continue;
5384  switch (AV_RB32(start)) {
5385  case VC1_CODE_FRAME:
5386  if (avctx->hwaccel ||
5388  buf_start = start;
5389  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5390  break;
5391  case VC1_CODE_FIELD: {
5392  int buf_size3;
5393  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5394  if (!slices)
5395  goto err;
5396  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5397  if (!slices[n_slices].buf)
5398  goto err;
5399  buf_size3 = vc1_unescape_buffer(start + 4, size,
5400  slices[n_slices].buf);
5401  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5402  buf_size3 << 3);
5403  /* assuming that the field marker is at the exact middle,
5404  hope it's correct */
5405  slices[n_slices].mby_start = s->mb_height >> 1;
5406  n_slices1 = n_slices - 1; // index of the last slice of the first field
5407  n_slices++;
5408  break;
5409  }
5410  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5411  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5412  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5413  vc1_decode_entry_point(avctx, v, &s->gb);
5414  break;
5415  case VC1_CODE_SLICE: {
5416  int buf_size3;
5417  slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5418  if (!slices)
5419  goto err;
5420  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5421  if (!slices[n_slices].buf)
5422  goto err;
5423  buf_size3 = vc1_unescape_buffer(start + 4, size,
5424  slices[n_slices].buf);
5425  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5426  buf_size3 << 3);
5427  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5428  n_slices++;
5429  break;
5430  }
5431  }
5432  }
5433  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5434  const uint8_t *divider;
5435  int buf_size3;
5436 
5437  divider = find_next_marker(buf, buf + buf_size);
5438  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5439  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5440  goto err;
5441  } else { // found field marker, unescape second field
5442  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5443  if (!tmp)
5444  goto err;
5445  slices = tmp;
5446  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5447  if (!slices[n_slices].buf)
5448  goto err;
5449  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5450  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5451  buf_size3 << 3);
5452  slices[n_slices].mby_start = s->mb_height >> 1;
5453  n_slices1 = n_slices - 1;
5454  n_slices++;
5455  }
5456  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5457  } else {
5458  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5459  }
5460  init_get_bits(&s->gb, buf2, buf_size2*8);
5461  } else
5462  init_get_bits(&s->gb, buf, buf_size*8);
5463 
5464  if (v->res_sprite) {
5465  v->new_sprite = !get_bits1(&s->gb);
5466  v->two_sprites = get_bits1(&s->gb);
5467  /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5468  we're using the sprite compositor. These are intentionally kept separate
5469  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5470  the vc1 one for WVP2 */
5471  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5472  if (v->new_sprite) {
5473  // switch AVCodecContext parameters to those of the sprites
5474  avctx->width = avctx->coded_width = v->sprite_width;
5475  avctx->height = avctx->coded_height = v->sprite_height;
5476  } else {
5477  goto image;
5478  }
5479  }
5480  }
5481 
5482  if (s->context_initialized &&
5483  (s->width != avctx->coded_width ||
5484  s->height != avctx->coded_height)) {
5485  vc1_decode_end(avctx);
5486  }
5487 
5488  if (!s->context_initialized) {
5489  if (ff_msmpeg4_decode_init(avctx) < 0)
5490  return -1;
5491  if (vc1_decode_init_alloc_tables(v) < 0) {
5492  MPV_common_end(s);
5493  return -1;
5494  }
5495 
5496  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5497 
5498  if (v->profile == PROFILE_ADVANCED) {
5499  s->h_edge_pos = avctx->coded_width;
5500  s->v_edge_pos = avctx->coded_height;
5501  }
5502  }
5503 
5504  /* We need to set current_picture_ptr before reading the header,
5505  * otherwise we cannot store anything in there. */
5506  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5507  int i = ff_find_unused_picture(s, 0);
5508  if (i < 0)
5509  goto err;
5510  s->current_picture_ptr = &s->picture[i];
5511  }
5512 
5513  // do parse frame header
5514  v->pic_header_flag = 0;
5515  if (v->profile < PROFILE_ADVANCED) {
5516  if (vc1_parse_frame_header(v, &s->gb) == -1) {
5517  goto err;
5518  }
5519  } else {
5520  if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5521  goto err;
5522  }
5523  }
5524 
5525  if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5526  && s->pict_type != AV_PICTURE_TYPE_I) {
5527  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5528  goto err;
5529  }
5530 
5531  // process pulldown flags
5533  // Pulldown flags are only valid when 'broadcast' has been set.
5534  // So ticks_per_frame will be 2
5535  if (v->rff) {
5536  // repeat field
5538  } else if (v->rptfrm) {
5539  // repeat frames
5540  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5541  }
5542 
5543  // for skipping the frame
5546 
5547  /* skip B-frames if we don't have reference frames */
5548  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5549  goto err;
5550  }
5551  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5552  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5553  avctx->skip_frame >= AVDISCARD_ALL) {
5554  goto end;
5555  }
5556 
5557  if (s->next_p_frame_damaged) {
5558  if (s->pict_type == AV_PICTURE_TYPE_B)
5559  goto end;
5560  else
5561  s->next_p_frame_damaged = 0;
5562  }
5563 
5564  if (MPV_frame_start(s, avctx) < 0) {
5565  goto err;
5566  }
5567 
5570 
5573  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5574  else if (avctx->hwaccel) {
5575  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5576  goto err;
5577  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5578  goto err;
5579  if (avctx->hwaccel->end_frame(avctx) < 0)
5580  goto err;
5581  } else {
5582  int header_ret = 0;
5583 
5584  ff_er_frame_start(s);
5585 
5586  v->bits = buf_size * 8;
5587  if (v->field_mode) {
5588  uint8_t *tmp[2];
5589  s->current_picture.f.linesize[0] <<= 1;
5590  s->current_picture.f.linesize[1] <<= 1;
5591  s->current_picture.f.linesize[2] <<= 1;
5592  s->linesize <<= 1;
5593  s->uvlinesize <<= 1;
5594  tmp[0] = v->mv_f_last[0];
5595  tmp[1] = v->mv_f_last[1];
5596  v->mv_f_last[0] = v->mv_f_next[0];
5597  v->mv_f_last[1] = v->mv_f_next[1];
5598  v->mv_f_next[0] = v->mv_f[0];
5599  v->mv_f_next[1] = v->mv_f[1];
5600  v->mv_f[0] = tmp[0];
5601  v->mv_f[1] = tmp[1];
5602  }
5603  mb_height = s->mb_height >> v->field_mode;
5604 
5605  if (!mb_height) {
5606  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
5607  goto err;
5608  }
5609 
5610  for (i = 0; i <= n_slices; i++) {
5611  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5612  if (v->field_mode <= 0) {
5613  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5614  "picture boundary (%d >= %d)\n", i,
5615  slices[i - 1].mby_start, mb_height);
5616  continue;
5617  }
5618  v->second_field = 1;
5619  v->blocks_off = s->mb_width * s->mb_height << 1;
5620  v->mb_off = s->mb_stride * s->mb_height >> 1;
5621  } else {
5622  v->second_field = 0;
5623  v->blocks_off = 0;
5624  v->mb_off = 0;
5625  }
5626  if (i) {
5627  v->pic_header_flag = 0;
5628  if (v->field_mode && i == n_slices1 + 2) {
5629  if ((header_ret = vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5630  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
5631  continue;
5632  }
5633  } else if (get_bits1(&s->gb)) {
5634  v->pic_header_flag = 1;
5635  if ((header_ret = vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5636  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
5637  continue;
5638  }
5639  }
5640  }
5641  if (header_ret < 0)
5642  continue;
5643  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5644  if (!v->field_mode || v->second_field)
5645  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5646  else
5647  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5648  vc1_decode_blocks(v);
5649  if (i != n_slices)
5650  s->gb = slices[i].gb;
5651  }
5652  if (v->field_mode) {
5653  v->second_field = 0;
5654  if (s->pict_type == AV_PICTURE_TYPE_B) {
5655  memcpy(v->mv_f_base, v->mv_f_next_base,
5656  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5657  }
5658  s->current_picture.f.linesize[0] >>= 1;
5659  s->current_picture.f.linesize[1] >>= 1;
5660  s->current_picture.f.linesize[2] >>= 1;
5661  s->linesize >>= 1;
5662  s->uvlinesize >>= 1;
5663  }
5664 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5665 // if (get_bits_count(&s->gb) > buf_size * 8)
5666 // return -1;
5667  ff_er_frame_end(s);
5668  }
5669 
5670  MPV_frame_end(s);
5671 
5672  if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5673 image:
5674  avctx->width = avctx->coded_width = v->output_width;
5675  avctx->height = avctx->coded_height = v->output_height;
5676  if (avctx->skip_frame >= AVDISCARD_NONREF)
5677  goto end;
5678 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5679  if (vc1_decode_sprites(v, &s->gb))
5680  goto err;
5681 #endif
5682  *pict = v->sprite_output_frame;
5683  *data_size = sizeof(AVFrame);
5684  } else {
5685  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5686  *pict = *(AVFrame*)s->current_picture_ptr;
5687  } else if (s->last_picture_ptr != NULL) {
5688  *pict = *(AVFrame*)s->last_picture_ptr;
5689  }
5690  if (s->last_picture_ptr || s->low_delay) {
5691  *data_size = sizeof(AVFrame);
5692  ff_print_debug_info(s, pict);
5693  }
5694  }
5695 
5696 end:
5697  av_free(buf2);
5698  for (i = 0; i < n_slices; i++)
5699  av_free(slices[i].buf);
5700  av_free(slices);
5701  return buf_size;
5702 
5703 err:
5704  av_free(buf2);
5705  for (i = 0; i < n_slices; i++)
5706  av_free(slices[i].buf);
5707  av_free(slices);
5708  return -1;
5709 }
5710 
5711 
5712 static const AVProfile profiles[] = {
5713  { FF_PROFILE_VC1_SIMPLE, "Simple" },
5714  { FF_PROFILE_VC1_MAIN, "Main" },
5715  { FF_PROFILE_VC1_COMPLEX, "Complex" },
5716  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5717  { FF_PROFILE_UNKNOWN },
5718 };
5719 
5721  .name = "vc1",
5722  .type = AVMEDIA_TYPE_VIDEO,
5723  .id = CODEC_ID_VC1,
5724  .priv_data_size = sizeof(VC1Context),
5725  .init = vc1_decode_init,
5726  .close = vc1_decode_end,
5728  .flush = ff_mpeg_flush,
5729  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5730  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5731  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5732  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5733 };
5734 
5735 #if CONFIG_WMV3_DECODER
5736 AVCodec ff_wmv3_decoder = {
5737  .name = "wmv3",
5738  .type = AVMEDIA_TYPE_VIDEO,
5739  .id = CODEC_ID_WMV3,
5740  .priv_data_size = sizeof(VC1Context),
5741  .init = vc1_decode_init,
5742  .close = vc1_decode_end,
5744  .flush = ff_mpeg_flush,
5745  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5746  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5747  .pix_fmts = ff_hwaccel_pixfmt_list_420,
5748  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5749 };
5750 #endif
5751 
5752 #if CONFIG_WMV3_VDPAU_DECODER
5753 AVCodec ff_wmv3_vdpau_decoder = {
5754  .name = "wmv3_vdpau",
5755  .type = AVMEDIA_TYPE_VIDEO,
5756  .id = CODEC_ID_WMV3,
5757  .priv_data_size = sizeof(VC1Context),
5758  .init = vc1_decode_init,
5759  .close = vc1_decode_end,
5762  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5763  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5764  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5765 };
5766 #endif
5767 
5768 #if CONFIG_VC1_VDPAU_DECODER
5769 AVCodec ff_vc1_vdpau_decoder = {
5770  .name = "vc1_vdpau",
5771  .type = AVMEDIA_TYPE_VIDEO,
5772  .id = CODEC_ID_VC1,
5773  .priv_data_size = sizeof(VC1Context),
5774  .init = vc1_decode_init,
5775  .close = vc1_decode_end,
5778  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5779  .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5780  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5781 };
5782 #endif
5783 
5784 #if CONFIG_WMV3IMAGE_DECODER
5785 AVCodec ff_wmv3image_decoder = {
5786  .name = "wmv3image",
5787  .type = AVMEDIA_TYPE_VIDEO,
5788  .id = CODEC_ID_WMV3IMAGE,
5789  .priv_data_size = sizeof(VC1Context),
5790  .init = vc1_decode_init,
5791  .close = vc1_decode_end,
5793  .capabilities = CODEC_CAP_DR1,
5794  .flush = vc1_sprite_flush,
5795  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5796  .pix_fmts = ff_pixfmt_list_420
5797 };
5798 #endif
5799 
5800 #if CONFIG_VC1IMAGE_DECODER
5801 AVCodec ff_vc1image_decoder = {
5802  .name = "vc1image",
5803  .type = AVMEDIA_TYPE_VIDEO,
5804  .id = CODEC_ID_VC1IMAGE,
5805  .priv_data_size = sizeof(VC1Context),
5806  .init = vc1_decode_init,
5807  .close = vc1_decode_end,
5809  .capabilities = CODEC_CAP_DR1,
5810  .flush = vc1_sprite_flush,
5811  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5812  .pix_fmts = ff_pixfmt_list_420
5813 };
5814 #endif