mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43 
44 //#undef NDEBUG
45 //#include <assert.h>
46 
48  DCTELEM *block, int n, int qscale);
50  DCTELEM *block, int n, int qscale);
52  DCTELEM *block, int n, int qscale);
54  DCTELEM *block, int n, int qscale);
56  DCTELEM *block, int n, int qscale);
58  DCTELEM *block, int n, int qscale);
60  DCTELEM *block, int n, int qscale);
61 
62 
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65 
66 //#define DEBUG
67 
68 
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 };
74 
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 };
86 
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 };
98 
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 };
110 
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 };
122 
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
128 };
129 
133 };
134 
141 };
142 
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144  const uint8_t *end,
145  uint32_t * restrict state)
146 {
147  int i;
148 
149  assert(p <= end);
150  if (p >= end)
151  return end;
152 
153  for (i = 0; i < 3; i++) {
154  uint32_t tmp = *state << 8;
155  *state = tmp + *(p++);
156  if (tmp == 0x100 || p == end)
157  return p;
158  }
159 
160  while (p < end) {
161  if (p[-1] > 1 ) p += 3;
162  else if (p[-2] ) p += 2;
163  else if (p[-3]|(p[-1]-1)) p++;
164  else {
165  p++;
166  break;
167  }
168  }
169 
170  p = FFMIN(p, end) - 4;
171  *state = AV_RB32(p);
172 
173  return p + 4;
174 }
175 
176 /* init common dct for both encoder and decoder */
178 {
179  dsputil_init(&s->dsp, s->avctx);
180 
186  if (s->flags & CODEC_FLAG_BITEXACT)
189 
190 #if HAVE_MMX
192 #elif ARCH_ALPHA
194 #elif CONFIG_MLIB
196 #elif HAVE_MMI
198 #elif ARCH_ARM
200 #elif HAVE_ALTIVEC
202 #elif ARCH_BFIN
204 #endif
205 
206  /* load & permutate scantables
207  * note: only wmv uses different ones
208  */
209  if (s->alternate_scan) {
212  } else {
215  }
218 
219  return 0;
220 }
221 
223 {
224  *dst = *src;
225  dst->f.type = FF_BUFFER_TYPE_COPY;
226 }
227 
232 {
233  /* Windows Media Image codecs allocate internal buffers with different
234  * dimensions; ignore user defined callbacks for these
235  */
238  else
241 }
242 
247 {
248  int r;
249 
250  if (s->avctx->hwaccel) {
251  assert(!pic->f.hwaccel_picture_private);
252  if (s->avctx->hwaccel->priv_data_size) {
254  if (!pic->f.hwaccel_picture_private) {
255  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256  return -1;
257  }
258  }
259  }
260 
262  r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
263  else
264  r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
265 
266  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
267  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
268  r, pic->f.type, pic->f.data[0]);
270  return -1;
271  }
272 
273  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274  s->uvlinesize != pic->f.linesize[1])) {
276  "get_buffer() failed (stride changed)\n");
277  free_frame_buffer(s, pic);
278  return -1;
279  }
280 
281  if (pic->f.linesize[1] != pic->f.linesize[2]) {
283  "get_buffer() failed (uv stride mismatch)\n");
284  free_frame_buffer(s, pic);
285  return -1;
286  }
287 
288  return 0;
289 }
290 
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 {
297  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 
299  // the + 1 is needed so memset(,,stride*height) does not sig11
300 
301  const int mb_array_size = s->mb_stride * s->mb_height;
302  const int b8_array_size = s->b8_stride * s->mb_height * 2;
303  const int b4_array_size = s->b4_stride * s->mb_height * 4;
304  int i;
305  int r = -1;
306 
307  if (shared) {
308  assert(pic->f.data[0]);
309  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
311  } else {
312  assert(!pic->f.data[0]);
313 
314  if (alloc_frame_buffer(s, pic) < 0)
315  return -1;
316 
317  s->linesize = pic->f.linesize[0];
318  s->uvlinesize = pic->f.linesize[1];
319  }
320 
321  if (pic->f.qscale_table == NULL) {
322  if (s->encoding) {
323  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324  mb_array_size * sizeof(int16_t), fail)
326  mb_array_size * sizeof(int16_t), fail)
328  mb_array_size * sizeof(int8_t ), fail)
329  }
330 
332  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
334  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335  fail)
337  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338  fail)
339  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341  if (s->out_format == FMT_H264) {
342  for (i = 0; i < 2; i++) {
344  2 * (b4_array_size + 4) * sizeof(int16_t),
345  fail)
346  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348  4 * mb_array_size * sizeof(uint8_t), fail)
349  }
350  pic->f.motion_subsample_log2 = 2;
351  } else if (s->out_format == FMT_H263 || s->encoding ||
352  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353  for (i = 0; i < 2; i++) {
355  2 * (b8_array_size + 4) * sizeof(int16_t),
356  fail)
357  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359  4 * mb_array_size * sizeof(uint8_t), fail)
360  }
361  pic->f.motion_subsample_log2 = 3;
362  }
363  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
365  64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366  }
367  pic->f.qstride = s->mb_stride;
369  1 * sizeof(AVPanScan), fail)
370  }
371 
372  pic->owner2 = s;
373 
374  return 0;
375 fail: // for the FF_ALLOCZ_OR_GOTO macro
376  if (r >= 0)
377  free_frame_buffer(s, pic);
378  return -1;
379 }
380 
384 static void free_picture(MpegEncContext *s, Picture *pic)
385 {
386  int i;
387 
388  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
389  free_frame_buffer(s, pic);
390  }
391 
392  av_freep(&pic->mb_var);
393  av_freep(&pic->mc_mb_var);
394  av_freep(&pic->mb_mean);
395  av_freep(&pic->f.mbskip_table);
397  av_freep(&pic->mb_type_base);
398  av_freep(&pic->f.dct_coeff);
399  av_freep(&pic->f.pan_scan);
400  pic->f.mb_type = NULL;
401  for (i = 0; i < 2; i++) {
402  av_freep(&pic->motion_val_base[i]);
403  av_freep(&pic->f.ref_index[i]);
404  }
405 
406  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
407  for (i = 0; i < 4; i++) {
408  pic->f.base[i] =
409  pic->f.data[i] = NULL;
410  }
411  pic->f.type = 0;
412  }
413 }
414 
416 {
417  int y_size = s->b8_stride * (2 * s->mb_height + 1);
418  int c_size = s->mb_stride * (s->mb_height + 1);
419  int yc_size = y_size + 2 * c_size;
420  int i;
421 
422  // edge emu needs blocksize + filter length - 1
423  // (= 17x17 for halfpel / 21x21 for h264)
425  (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
426 
427  // FIXME should be linesize instead of s->width * 2
428  // but that is not known before get_buffer()
430  (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
431  s->me.temp = s->me.scratchpad;
432  s->rd_scratchpad = s->me.scratchpad;
433  s->b_scratchpad = s->me.scratchpad;
434  s->obmc_scratchpad = s->me.scratchpad + 16;
435  if (s->encoding) {
436  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
437  ME_MAP_SIZE * sizeof(uint32_t), fail)
439  ME_MAP_SIZE * sizeof(uint32_t), fail)
440  if (s->avctx->noise_reduction) {
442  2 * 64 * sizeof(int), fail)
443  }
444  }
445  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
446  s->block = s->blocks[0];
447 
448  for (i = 0; i < 12; i++) {
449  s->pblocks[i] = &s->block[i];
450  }
451 
452  if (s->out_format == FMT_H263) {
453  /* ac values */
455  yc_size * sizeof(int16_t) * 16, fail);
456  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
457  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
458  s->ac_val[2] = s->ac_val[1] + c_size;
459  }
460 
461  return 0;
462 fail:
463  return -1; // free() through MPV_common_end()
464 }
465 
467 {
468  if (s == NULL)
469  return;
470 
472  av_freep(&s->me.scratchpad);
473  s->me.temp =
474  s->rd_scratchpad =
475  s->b_scratchpad =
476  s->obmc_scratchpad = NULL;
477 
478  av_freep(&s->dct_error_sum);
479  av_freep(&s->me.map);
480  av_freep(&s->me.score_map);
481  av_freep(&s->blocks);
482  av_freep(&s->ac_val_base);
483  s->block = NULL;
484 }
485 
487 {
488 #define COPY(a) bak->a = src->a
489  COPY(edge_emu_buffer);
490  COPY(me.scratchpad);
491  COPY(me.temp);
492  COPY(rd_scratchpad);
493  COPY(b_scratchpad);
494  COPY(obmc_scratchpad);
495  COPY(me.map);
496  COPY(me.score_map);
497  COPY(blocks);
498  COPY(block);
499  COPY(start_mb_y);
500  COPY(end_mb_y);
501  COPY(me.map_generation);
502  COPY(pb);
503  COPY(dct_error_sum);
504  COPY(dct_count[0]);
505  COPY(dct_count[1]);
506  COPY(ac_val_base);
507  COPY(ac_val[0]);
508  COPY(ac_val[1]);
509  COPY(ac_val[2]);
510 #undef COPY
511 }
512 
514 {
515  MpegEncContext bak;
516  int i;
517  // FIXME copy only needed parts
518  // START_TIMER
519  backup_duplicate_context(&bak, dst);
520  memcpy(dst, src, sizeof(MpegEncContext));
521  backup_duplicate_context(dst, &bak);
522  for (i = 0; i < 12; i++) {
523  dst->pblocks[i] = &dst->block[i];
524  }
525  // STOP_TIMER("update_duplicate_context")
526  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 }
528 
530  const AVCodecContext *src)
531 {
532  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
533 
534  if (dst == src || !s1->context_initialized)
535  return 0;
536 
537  // FIXME can parameters change on I-frames?
538  // in that case dst may need a reinit
539  if (!s->context_initialized) {
540  memcpy(s, s1, sizeof(MpegEncContext));
541 
542  s->avctx = dst;
545  s->bitstream_buffer = NULL;
547 
548  MPV_common_init(s);
549  }
550 
551  s->avctx->coded_height = s1->avctx->coded_height;
552  s->avctx->coded_width = s1->avctx->coded_width;
553  s->avctx->width = s1->avctx->width;
554  s->avctx->height = s1->avctx->height;
555 
556  s->coded_picture_number = s1->coded_picture_number;
557  s->picture_number = s1->picture_number;
558  s->input_picture_number = s1->input_picture_number;
559 
560  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
561  memcpy(&s->last_picture, &s1->last_picture,
562  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
563 
564  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
565  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
566  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
567 
568  // Error/bug resilience
569  s->next_p_frame_damaged = s1->next_p_frame_damaged;
570  s->workaround_bugs = s1->workaround_bugs;
571 
572  // MPEG4 timing info
573  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
574  (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 
576  // B-frame info
577  s->max_b_frames = s1->max_b_frames;
578  s->low_delay = s1->low_delay;
579  s->dropable = s1->dropable;
580 
581  // DivX handling (doesn't work)
582  s->divx_packed = s1->divx_packed;
583 
584  if (s1->bitstream_buffer) {
585  if (s1->bitstream_buffer_size +
589  s1->allocated_bitstream_buffer_size);
590  s->bitstream_buffer_size = s1->bitstream_buffer_size;
591  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
592  s1->bitstream_buffer_size);
593  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
595  }
596 
597  // MPEG2/interlacing info
598  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
599  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
600 
601  if (!s1->first_field) {
602  s->last_pict_type = s1->pict_type;
603  if (s1->current_picture_ptr)
604  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
605 
606  if (s1->pict_type != AV_PICTURE_TYPE_B) {
607  s->last_non_b_pict_type = s1->pict_type;
608  }
609  }
610 
611  return 0;
612 }
613 
621 {
622  s->y_dc_scale_table =
625  s->progressive_frame = 1;
626  s->progressive_sequence = 1;
628 
629  s->coded_picture_number = 0;
630  s->picture_number = 0;
631  s->input_picture_number = 0;
632 
633  s->picture_in_gop_number = 0;
634 
635  s->f_code = 1;
636  s->b_code = 1;
637 
638  s->picture_range_start = 0;
640 
641  s->slice_context_count = 1;
642 }
643 
650 {
652 }
653 
659 {
660  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
661  int nb_slices = (HAVE_THREADS &&
663  s->avctx->thread_count : 1;
664 
665  if (s->encoding && s->avctx->slices)
666  nb_slices = s->avctx->slices;
667 
669  s->mb_height = (s->height + 31) / 32 * 2;
670  else if (s->codec_id != CODEC_ID_H264)
671  s->mb_height = (s->height + 15) / 16;
672 
673  if (s->avctx->pix_fmt == PIX_FMT_NONE) {
675  "decoding to PIX_FMT_NONE is not supported.\n");
676  return -1;
677  }
678 
679  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680  int max_slices;
681  if (s->mb_height)
682  max_slices = FFMIN(MAX_THREADS, s->mb_height);
683  else
684  max_slices = MAX_THREADS;
685  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
686  " reducing to %d\n", nb_slices, max_slices);
687  nb_slices = max_slices;
688  }
689 
690  if ((s->width || s->height) &&
691  av_image_check_size(s->width, s->height, 0, s->avctx))
692  return -1;
693 
695 
696  s->flags = s->avctx->flags;
697  s->flags2 = s->avctx->flags2;
698 
699  /* set chroma shifts */
701  &s->chroma_y_shift);
702 
703  /* convert fourcc to upper case */
705 
707 
708  if (s->width && s->height) {
709  s->mb_width = (s->width + 15) / 16;
710  s->mb_stride = s->mb_width + 1;
711  s->b8_stride = s->mb_width * 2 + 1;
712  s->b4_stride = s->mb_width * 4 + 1;
713  mb_array_size = s->mb_height * s->mb_stride;
714  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
715 
716  /* set default edge pos, will be overriden
717  * in decode_header if needed */
718  s->h_edge_pos = s->mb_width * 16;
719  s->v_edge_pos = s->mb_height * 16;
720 
721  s->mb_num = s->mb_width * s->mb_height;
722 
723  s->block_wrap[0] =
724  s->block_wrap[1] =
725  s->block_wrap[2] =
726  s->block_wrap[3] = s->b8_stride;
727  s->block_wrap[4] =
728  s->block_wrap[5] = s->mb_stride;
729 
730  y_size = s->b8_stride * (2 * s->mb_height + 1);
731  c_size = s->mb_stride * (s->mb_height + 1);
732  yc_size = y_size + 2 * c_size;
733 
735 
736  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
737  fail); // error ressilience code looks cleaner with this
738  for (y = 0; y < s->mb_height; y++)
739  for (x = 0; x < s->mb_width; x++)
740  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
741 
742  s->mb_index2xy[s->mb_height * s->mb_width] =
743  (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 
745  if (s->encoding) {
746  /* Allocate MV tables */
748  mv_table_size * 2 * sizeof(int16_t), fail);
750  mv_table_size * 2 * sizeof(int16_t), fail);
752  mv_table_size * 2 * sizeof(int16_t), fail);
754  mv_table_size * 2 * sizeof(int16_t), fail);
756  mv_table_size * 2 * sizeof(int16_t), fail);
758  mv_table_size * 2 * sizeof(int16_t), fail);
759  s->p_mv_table = s->p_mv_table_base +
760  s->mb_stride + 1;
762  s->mb_stride + 1;
764  s->mb_stride + 1;
766  s->mb_stride + 1;
768  s->mb_stride + 1;
770  s->mb_stride + 1;
771 
772  if (s->msmpeg4_version) {
774  2 * 2 * (MAX_LEVEL + 1) *
775  (MAX_RUN + 1) * 2 * sizeof(int), fail);
776  }
777  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
778 
779  /* Allocate MB type table */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
781  sizeof(uint16_t), fail); // needed for encoding
782 
783  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784  sizeof(int), fail);
785 
787  64 * 32 * sizeof(int), fail);
789  64 * 32 * sizeof(int), fail);
791  64 * 32 * 2 * sizeof(uint16_t), fail);
793  64 * 32 * 2 * sizeof(uint16_t), fail);
795  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
798 
799  if (s->avctx->noise_reduction) {
801  2 * 64 * sizeof(uint16_t), fail);
802  }
803  }
804  }
805 
808  s->picture_count * sizeof(Picture), fail);
809  for (i = 0; i < s->picture_count; i++) {
811  }
812 
813  if (s->width && s->height) {
815  mb_array_size * sizeof(uint8_t), fail);
816 
817  if (s->codec_id == CODEC_ID_MPEG4 ||
819  /* interlaced direct mode decoding tables */
820  for (i = 0; i < 2; i++) {
821  int j, k;
822  for (j = 0; j < 2; j++) {
823  for (k = 0; k < 2; k++) {
825  s->b_field_mv_table_base[i][j][k],
826  mv_table_size * 2 * sizeof(int16_t),
827  fail);
828  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829  s->mb_stride + 1;
830  }
832  mb_array_size * 2 * sizeof(uint8_t),
833  fail);
835  mv_table_size * 2 * sizeof(int16_t),
836  fail);
837  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838  + s->mb_stride + 1;
839  }
841  mb_array_size * 2 * sizeof(uint8_t),
842  fail);
843  }
844  }
845  if (s->out_format == FMT_H263) {
846  /* cbp values */
847  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
848  s->coded_block = s->coded_block_base + s->b8_stride + 1;
849 
850  /* cbp, ac_pred, pred_dir */
852  mb_array_size * sizeof(uint8_t), fail);
854  mb_array_size * sizeof(uint8_t), fail);
855  }
856 
857  if (s->h263_pred || s->h263_plus || !s->encoding) {
858  /* dc values */
859  // MN: we need these for error resilience of intra-frames
861  yc_size * sizeof(int16_t), fail);
862  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
863  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
864  s->dc_val[2] = s->dc_val[1] + c_size;
865  for (i = 0; i < yc_size; i++)
866  s->dc_val_base[i] = 1024;
867  }
868 
869  /* which mb is a intra block */
870  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
871  memset(s->mbintra_table, 1, mb_array_size);
872 
873  /* init macroblock skip table */
874  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
875  // Note the + 1 is for a quicker mpeg4 slice_end detection
876 
877  s->parse_context.state = -1;
879  s->avctx->debug_mv) {
880  s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
881  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882  s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
883  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
884  s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
885  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
886  }
887  }
888 
889  s->context_initialized = 1;
890  s->thread_context[0] = s;
891 
892  if (s->width && s->height) {
893  if (nb_slices > 1) {
894  for (i = 1; i < nb_slices; i++) {
895  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
896  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897  }
898 
899  for (i = 0; i < nb_slices; i++) {
900  if (init_duplicate_context(s->thread_context[i], s) < 0)
901  goto fail;
902  s->thread_context[i]->start_mb_y =
903  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
904  s->thread_context[i]->end_mb_y =
905  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906  }
907  } else {
908  if (init_duplicate_context(s, s) < 0)
909  goto fail;
910  s->start_mb_y = 0;
911  s->end_mb_y = s->mb_height;
912  }
913  s->slice_context_count = nb_slices;
914  }
915 
916  return 0;
917  fail:
918  MPV_common_end(s);
919  return -1;
920 }
921 
922 /* init common structure for both encoder and decoder */
924 {
925  int i, j, k;
926 
927  if (s->slice_context_count > 1) {
928  for (i = 0; i < s->slice_context_count; i++) {
930  }
931  for (i = 1; i < s->slice_context_count; i++) {
932  av_freep(&s->thread_context[i]);
933  }
934  s->slice_context_count = 1;
935  } else free_duplicate_context(s);
936 
938  s->parse_context.buffer_size = 0;
939 
940  av_freep(&s->mb_type);
947  s->p_mv_table = NULL;
948  s->b_forw_mv_table = NULL;
949  s->b_back_mv_table = NULL;
952  s->b_direct_mv_table = NULL;
953  for (i = 0; i < 2; i++) {
954  for (j = 0; j < 2; j++) {
955  for (k = 0; k < 2; k++) {
956  av_freep(&s->b_field_mv_table_base[i][j][k]);
957  s->b_field_mv_table[i][j][k] = NULL;
958  }
959  av_freep(&s->b_field_select_table[i][j]);
960  av_freep(&s->p_field_mv_table_base[i][j]);
961  s->p_field_mv_table[i][j] = NULL;
962  }
964  }
965 
966  av_freep(&s->dc_val_base);
968  av_freep(&s->mbintra_table);
969  av_freep(&s->cbp_table);
971 
972  av_freep(&s->mbskip_table);
975 
976  av_freep(&s->avctx->stats_out);
977  av_freep(&s->ac_stats);
979  av_freep(&s->mb_index2xy);
980  av_freep(&s->lambda_table);
985  av_freep(&s->input_picture);
987  av_freep(&s->dct_offset);
988 
989  if (s->picture && !s->avctx->internal->is_copy) {
990  for (i = 0; i < s->picture_count; i++) {
991  free_picture(s, &s->picture[i]);
992  }
993  }
994  av_freep(&s->picture);
995  s->context_initialized = 0;
996  s->last_picture_ptr =
997  s->next_picture_ptr =
999  s->linesize = s->uvlinesize = 0;
1000 
1001  for (i = 0; i < 3; i++)
1003 
1006 }
1007 
1009  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1010 {
1011  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1012  uint8_t index_run[MAX_RUN + 1];
1013  int last, run, level, start, end, i;
1014 
1015  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1016  if (static_store && rl->max_level[0])
1017  return;
1018 
1019  /* compute max_level[], max_run[] and index_run[] */
1020  for (last = 0; last < 2; last++) {
1021  if (last == 0) {
1022  start = 0;
1023  end = rl->last;
1024  } else {
1025  start = rl->last;
1026  end = rl->n;
1027  }
1028 
1029  memset(max_level, 0, MAX_RUN + 1);
1030  memset(max_run, 0, MAX_LEVEL + 1);
1031  memset(index_run, rl->n, MAX_RUN + 1);
1032  for (i = start; i < end; i++) {
1033  run = rl->table_run[i];
1034  level = rl->table_level[i];
1035  if (index_run[run] == rl->n)
1036  index_run[run] = i;
1037  if (level > max_level[run])
1038  max_level[run] = level;
1039  if (run > max_run[level])
1040  max_run[level] = run;
1041  }
1042  if (static_store)
1043  rl->max_level[last] = static_store[last];
1044  else
1045  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1046  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1047  if (static_store)
1048  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1049  else
1050  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1051  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1052  if (static_store)
1053  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1054  else
1055  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1056  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1057  }
1058 }
1059 
1061 {
1062  int i, q;
1063 
1064  for (q = 0; q < 32; q++) {
1065  int qmul = q * 2;
1066  int qadd = (q - 1) | 1;
1067 
1068  if (q == 0) {
1069  qmul = 1;
1070  qadd = 0;
1071  }
1072  for (i = 0; i < rl->vlc.table_size; i++) {
1073  int code = rl->vlc.table[i][0];
1074  int len = rl->vlc.table[i][1];
1075  int level, run;
1076 
1077  if (len == 0) { // illegal code
1078  run = 66;
1079  level = MAX_LEVEL;
1080  } else if (len < 0) { // more bits needed
1081  run = 0;
1082  level = code;
1083  } else {
1084  if (code == rl->n) { // esc
1085  run = 66;
1086  level = 0;
1087  } else {
1088  run = rl->table_run[code] + 1;
1089  level = rl->table_level[code] * qmul + qadd;
1090  if (code >= rl->last) run += 192;
1091  }
1092  }
1093  rl->rl_vlc[q][i].len = len;
1094  rl->rl_vlc[q][i].level = level;
1095  rl->rl_vlc[q][i].run = run;
1096  }
1097  }
1098 }
1099 
1100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1101 {
1102  int i;
1103 
1104  /* release non reference frames */
1105  for (i = 0; i < s->picture_count; i++) {
1106  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1107  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1108  (remove_current || &s->picture[i] != s->current_picture_ptr)
1109  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1110  free_frame_buffer(s, &s->picture[i]);
1111  }
1112  }
1113 }
1114 
1116 {
1117  int i;
1118 
1119  if (shared) {
1120  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1121  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1122  return i;
1123  }
1124  } else {
1125  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1126  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127  return i; // FIXME
1128  }
1129  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1130  if (s->picture[i].f.data[0] == NULL)
1131  return i;
1132  }
1133  }
1134 
1135  return AVERROR_INVALIDDATA;
1136 }
1137 
1139 {
1140  int intra, i;
1141 
1142  for (intra = 0; intra < 2; intra++) {
1143  if (s->dct_count[intra] > (1 << 16)) {
1144  for (i = 0; i < 64; i++) {
1145  s->dct_error_sum[intra][i] >>= 1;
1146  }
1147  s->dct_count[intra] >>= 1;
1148  }
1149 
1150  for (i = 0; i < 64; i++) {
1151  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1152  s->dct_count[intra] +
1153  s->dct_error_sum[intra][i] / 2) /
1154  (s->dct_error_sum[intra][i] + 1);
1155  }
1156  }
1157 }
1158 
1164 {
1165  int i;
1166  Picture *pic;
1167  s->mb_skipped = 0;
1168 
1169  assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1170  s->codec_id == CODEC_ID_SVQ3);
1171 
1172  /* mark & release old frames */
1173  if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1174  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1176  s->last_picture_ptr->f.data[0]) {
1177  if (s->last_picture_ptr->owner2 == s)
1179  }
1180 
1181  /* release forgotten pictures */
1182  /* if (mpeg124/h263) */
1183  if (!s->encoding) {
1184  for (i = 0; i < s->picture_count; i++) {
1185  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1186  &s->picture[i] != s->last_picture_ptr &&
1187  &s->picture[i] != s->next_picture_ptr &&
1188  s->picture[i].f.reference) {
1189  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1190  av_log(avctx, AV_LOG_ERROR,
1191  "releasing zombie picture\n");
1192  free_frame_buffer(s, &s->picture[i]);
1193  }
1194  }
1195  }
1196  }
1197 
1198  if (!s->encoding) {
1200 
1201  if (s->current_picture_ptr &&
1202  s->current_picture_ptr->f.data[0] == NULL) {
1203  // we already have a unused image
1204  // (maybe it was set before reading the header)
1205  pic = s->current_picture_ptr;
1206  } else {
1207  i = ff_find_unused_picture(s, 0);
1208  pic = &s->picture[i];
1209  }
1210 
1211  pic->f.reference = 0;
1212  if (!s->dropable) {
1213  if (s->codec_id == CODEC_ID_H264)
1214  pic->f.reference = s->picture_structure;
1215  else if (s->pict_type != AV_PICTURE_TYPE_B)
1216  pic->f.reference = 3;
1217  }
1218 
1220 
1221  if (ff_alloc_picture(s, pic, 0) < 0)
1222  return -1;
1223 
1224  s->current_picture_ptr = pic;
1225  // FIXME use only the vars from current_pic
1227  if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1228  s->codec_id == CODEC_ID_MPEG2VIDEO) {
1229  if (s->picture_structure != PICT_FRAME)
1232  }
1236  }
1237 
1239  // if (s->flags && CODEC_FLAG_QSCALE)
1240  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1242 
1244 
1245  if (s->pict_type != AV_PICTURE_TYPE_B) {
1247  if (!s->dropable)
1249  }
1250  /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1251  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1252  s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1253  s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1254  s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1255  s->pict_type, s->dropable); */
1256 
1257  if (s->codec_id != CODEC_ID_H264) {
1258  if ((s->last_picture_ptr == NULL ||
1259  s->last_picture_ptr->f.data[0] == NULL) &&
1260  (s->pict_type != AV_PICTURE_TYPE_I ||
1261  s->picture_structure != PICT_FRAME)) {
1262  if (s->pict_type != AV_PICTURE_TYPE_I)
1263  av_log(avctx, AV_LOG_ERROR,
1264  "warning: first frame is no keyframe\n");
1265  else if (s->picture_structure != PICT_FRAME)
1266  av_log(avctx, AV_LOG_INFO,
1267  "allocate dummy last picture for field based first keyframe\n");
1268 
1269  /* Allocate a dummy frame */
1270  i = ff_find_unused_picture(s, 0);
1271  s->last_picture_ptr = &s->picture[i];
1272 
1273  s->last_picture_ptr->f.reference = 3;
1275 
1276  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1277  return -1;
1279  INT_MAX, 0);
1281  INT_MAX, 1);
1282  }
1283  if ((s->next_picture_ptr == NULL ||
1284  s->next_picture_ptr->f.data[0] == NULL) &&
1285  s->pict_type == AV_PICTURE_TYPE_B) {
1286  /* Allocate a dummy frame */
1287  i = ff_find_unused_picture(s, 0);
1288  s->next_picture_ptr = &s->picture[i];
1289 
1290  s->next_picture_ptr->f.reference = 3;
1292 
1293  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1294  return -1;
1296  INT_MAX, 0);
1298  INT_MAX, 1);
1299  }
1300  }
1301 
1302  if (s->last_picture_ptr)
1304  if (s->next_picture_ptr)
1306 
1307  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1308  (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1309  if (s->next_picture_ptr)
1310  s->next_picture_ptr->owner2 = s;
1311  if (s->last_picture_ptr)
1312  s->last_picture_ptr->owner2 = s;
1313  }
1314 
1315  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1316  s->last_picture_ptr->f.data[0]));
1317 
1318  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1319  int i;
1320  for (i = 0; i < 4; i++) {
1322  s->current_picture.f.data[i] +=
1323  s->current_picture.f.linesize[i];
1324  }
1325  s->current_picture.f.linesize[i] *= 2;
1326  s->last_picture.f.linesize[i] *= 2;
1327  s->next_picture.f.linesize[i] *= 2;
1328  }
1329  }
1330 
1331  s->err_recognition = avctx->err_recognition;
1332 
1333  /* set dequantizer, we can't do it during init as
1334  * it might change for mpeg4 and we can't do it in the header
1335  * decode as init is not called for mpeg4 there yet */
1336  if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1339  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1342  } else {
1345  }
1346 
1347  if (s->dct_error_sum) {
1348  assert(s->avctx->noise_reduction && s->encoding);
1350  }
1351 
1353  return ff_xvmc_field_start(s, avctx);
1354 
1355  return 0;
1356 }
1357 
1358 /* generic function for encode/decode called after a
1359  * frame has been coded/decoded. */
1361 {
1362  int i;
1363  /* redraw edges for the frame if decoding didn't complete */
1364  // just to make sure that all data is rendered.
1366  ff_xvmc_field_end(s);
1367  } else if ((s->error_count || s->encoding) &&
1368  !s->avctx->hwaccel &&
1370  s->unrestricted_mv &&
1372  !s->intra_only &&
1373  !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1377  s->h_edge_pos, s->v_edge_pos,
1379  EDGE_TOP | EDGE_BOTTOM);
1381  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1382  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1383  EDGE_TOP | EDGE_BOTTOM);
1385  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1386  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1387  EDGE_TOP | EDGE_BOTTOM);
1388  }
1389 
1390  emms_c();
1391 
1392  s->last_pict_type = s->pict_type;
1394  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1396  }
1397 #if 0
1398  /* copy back current_picture variables */
1399  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1400  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1401  s->picture[i] = s->current_picture;
1402  break;
1403  }
1404  }
1405  assert(i < MAX_PICTURE_COUNT);
1406 #endif
1407 
1408  if (s->encoding) {
1409  /* release non-reference frames */
1410  for (i = 0; i < s->picture_count; i++) {
1411  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1412  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1413  free_frame_buffer(s, &s->picture[i]);
1414  }
1415  }
1416  }
1417  // clear copies, to avoid confusion
1418 #if 0
1419  memset(&s->last_picture, 0, sizeof(Picture));
1420  memset(&s->next_picture, 0, sizeof(Picture));
1421  memset(&s->current_picture, 0, sizeof(Picture));
1422 #endif
1424 
1425  if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1427  }
1428 }
1429 
1437 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1438  int w, int h, int stride, int color)
1439 {
1440  int x, y, fr, f;
1441 
1442  sx = av_clip(sx, 0, w - 1);
1443  sy = av_clip(sy, 0, h - 1);
1444  ex = av_clip(ex, 0, w - 1);
1445  ey = av_clip(ey, 0, h - 1);
1446 
1447  buf[sy * stride + sx] += color;
1448 
1449  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1450  if (sx > ex) {
1451  FFSWAP(int, sx, ex);
1452  FFSWAP(int, sy, ey);
1453  }
1454  buf += sx + sy * stride;
1455  ex -= sx;
1456  f = ((ey - sy) << 16) / ex;
1457  for (x = 0; x = ex; x++) {
1458  y = (x * f) >> 16;
1459  fr = (x * f) & 0xFFFF;
1460  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1461  buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1462  }
1463  } else {
1464  if (sy > ey) {
1465  FFSWAP(int, sx, ex);
1466  FFSWAP(int, sy, ey);
1467  }
1468  buf += sx + sy * stride;
1469  ey -= sy;
1470  if (ey)
1471  f = ((ex - sx) << 16) / ey;
1472  else
1473  f = 0;
1474  for (y = 0; y = ey; y++) {
1475  x = (y * f) >> 16;
1476  fr = (y * f) & 0xFFFF;
1477  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1478  buf[y * stride + x + 1] += (color * fr ) >> 16;
1479  }
1480  }
1481 }
1482 
1490 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1491  int ey, int w, int h, int stride, int color)
1492 {
1493  int dx,dy;
1494 
1495  sx = av_clip(sx, -100, w + 100);
1496  sy = av_clip(sy, -100, h + 100);
1497  ex = av_clip(ex, -100, w + 100);
1498  ey = av_clip(ey, -100, h + 100);
1499 
1500  dx = ex - sx;
1501  dy = ey - sy;
1502 
1503  if (dx * dx + dy * dy > 3 * 3) {
1504  int rx = dx + dy;
1505  int ry = -dx + dy;
1506  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1507 
1508  // FIXME subpixel accuracy
1509  rx = ROUNDED_DIV(rx * 3 << 4, length);
1510  ry = ROUNDED_DIV(ry * 3 << 4, length);
1511 
1512  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1513  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1514  }
1515  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1516 }
1517 
1522 {
1523  if (s->avctx->hwaccel || !pict || !pict->mb_type)
1524  return;
1525 
1527  int x,y;
1528 
1529  av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1530  switch (pict->pict_type) {
1531  case AV_PICTURE_TYPE_I:
1532  av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1533  break;
1534  case AV_PICTURE_TYPE_P:
1535  av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1536  break;
1537  case AV_PICTURE_TYPE_B:
1538  av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1539  break;
1540  case AV_PICTURE_TYPE_S:
1541  av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1542  break;
1543  case AV_PICTURE_TYPE_SI:
1544  av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1545  break;
1546  case AV_PICTURE_TYPE_SP:
1547  av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1548  break;
1549  }
1550  for (y = 0; y < s->mb_height; y++) {
1551  for (x = 0; x < s->mb_width; x++) {
1552  if (s->avctx->debug & FF_DEBUG_SKIP) {
1553  int count = s->mbskip_table[x + y * s->mb_stride];
1554  if (count > 9)
1555  count = 9;
1556  av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1557  }
1558  if (s->avctx->debug & FF_DEBUG_QP) {
1559  av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1560  pict->qscale_table[x + y * s->mb_stride]);
1561  }
1562  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1563  int mb_type = pict->mb_type[x + y * s->mb_stride];
1564  // Type & MV direction
1565  if (IS_PCM(mb_type))
1566  av_log(s->avctx, AV_LOG_DEBUG, "P");
1567  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1568  av_log(s->avctx, AV_LOG_DEBUG, "A");
1569  else if (IS_INTRA4x4(mb_type))
1570  av_log(s->avctx, AV_LOG_DEBUG, "i");
1571  else if (IS_INTRA16x16(mb_type))
1572  av_log(s->avctx, AV_LOG_DEBUG, "I");
1573  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1574  av_log(s->avctx, AV_LOG_DEBUG, "d");
1575  else if (IS_DIRECT(mb_type))
1576  av_log(s->avctx, AV_LOG_DEBUG, "D");
1577  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1578  av_log(s->avctx, AV_LOG_DEBUG, "g");
1579  else if (IS_GMC(mb_type))
1580  av_log(s->avctx, AV_LOG_DEBUG, "G");
1581  else if (IS_SKIP(mb_type))
1582  av_log(s->avctx, AV_LOG_DEBUG, "S");
1583  else if (!USES_LIST(mb_type, 1))
1584  av_log(s->avctx, AV_LOG_DEBUG, ">");
1585  else if (!USES_LIST(mb_type, 0))
1586  av_log(s->avctx, AV_LOG_DEBUG, "<");
1587  else {
1588  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1589  av_log(s->avctx, AV_LOG_DEBUG, "X");
1590  }
1591 
1592  // segmentation
1593  if (IS_8X8(mb_type))
1594  av_log(s->avctx, AV_LOG_DEBUG, "+");
1595  else if (IS_16X8(mb_type))
1596  av_log(s->avctx, AV_LOG_DEBUG, "-");
1597  else if (IS_8X16(mb_type))
1598  av_log(s->avctx, AV_LOG_DEBUG, "|");
1599  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1600  av_log(s->avctx, AV_LOG_DEBUG, " ");
1601  else
1602  av_log(s->avctx, AV_LOG_DEBUG, "?");
1603 
1604 
1605  if (IS_INTERLACED(mb_type))
1606  av_log(s->avctx, AV_LOG_DEBUG, "=");
1607  else
1608  av_log(s->avctx, AV_LOG_DEBUG, " ");
1609  }
1610  // av_log(s->avctx, AV_LOG_DEBUG, " ");
1611  }
1612  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1613  }
1614  }
1615 
1616  if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1617  (s->avctx->debug_mv)) {
1618  const int shift = 1 + s->quarter_sample;
1619  int mb_y;
1620  uint8_t *ptr;
1621  int i;
1622  int h_chroma_shift, v_chroma_shift, block_height;
1623  const int width = s->avctx->width;
1624  const int height = s->avctx->height;
1625  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1626  const int mv_stride = (s->mb_width << mv_sample_log2) +
1627  (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1628  s->low_delay = 0; // needed to see the vectors without trashing the buffers
1629 
1631  &h_chroma_shift, &v_chroma_shift);
1632  for (i = 0; i < 3; i++) {
1633  memcpy(s->visualization_buffer[i], pict->data[i],
1634  (i == 0) ? pict->linesize[i] * height:
1635  pict->linesize[i] * height >> v_chroma_shift);
1636  pict->data[i] = s->visualization_buffer[i];
1637  }
1638  pict->type = FF_BUFFER_TYPE_COPY;
1639  ptr = pict->data[0];
1640  block_height = 16 >> v_chroma_shift;
1641 
1642  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1643  int mb_x;
1644  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1645  const int mb_index = mb_x + mb_y * s->mb_stride;
1646  if ((s->avctx->debug_mv) && pict->motion_val) {
1647  int type;
1648  for (type = 0; type < 3; type++) {
1649  int direction = 0;
1650  switch (type) {
1651  case 0:
1652  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1653  (pict->pict_type!= AV_PICTURE_TYPE_P))
1654  continue;
1655  direction = 0;
1656  break;
1657  case 1:
1658  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1659  (pict->pict_type!= AV_PICTURE_TYPE_B))
1660  continue;
1661  direction = 0;
1662  break;
1663  case 2:
1664  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1665  (pict->pict_type!= AV_PICTURE_TYPE_B))
1666  continue;
1667  direction = 1;
1668  break;
1669  }
1670  if (!USES_LIST(pict->mb_type[mb_index], direction))
1671  continue;
1672 
1673  if (IS_8X8(pict->mb_type[mb_index])) {
1674  int i;
1675  for (i = 0; i < 4; i++) {
1676  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1677  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1678  int xy = (mb_x * 2 + (i & 1) +
1679  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1680  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1681  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1682  draw_arrow(ptr, sx, sy, mx, my, width,
1683  height, s->linesize, 100);
1684  }
1685  } else if (IS_16X8(pict->mb_type[mb_index])) {
1686  int i;
1687  for (i = 0; i < 2; i++) {
1688  int sx = mb_x * 16 + 8;
1689  int sy = mb_y * 16 + 4 + 8 * i;
1690  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1691  int mx = (pict->motion_val[direction][xy][0] >> shift);
1692  int my = (pict->motion_val[direction][xy][1] >> shift);
1693 
1694  if (IS_INTERLACED(pict->mb_type[mb_index]))
1695  my *= 2;
1696 
1697  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1698  height, s->linesize, 100);
1699  }
1700  } else if (IS_8X16(pict->mb_type[mb_index])) {
1701  int i;
1702  for (i = 0; i < 2; i++) {
1703  int sx = mb_x * 16 + 4 + 8 * i;
1704  int sy = mb_y * 16 + 8;
1705  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1706  int mx = pict->motion_val[direction][xy][0] >> shift;
1707  int my = pict->motion_val[direction][xy][1] >> shift;
1708 
1709  if (IS_INTERLACED(pict->mb_type[mb_index]))
1710  my *= 2;
1711 
1712  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1713  height, s->linesize, 100);
1714  }
1715  } else {
1716  int sx = mb_x * 16 + 8;
1717  int sy = mb_y * 16 + 8;
1718  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1719  int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1720  int my = pict->motion_val[direction][xy][1] >> shift + sy;
1721  draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1722  }
1723  }
1724  }
1725  if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1726  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1727  0x0101010101010101ULL;
1728  int y;
1729  for (y = 0; y < block_height; y++) {
1730  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1731  (block_height * mb_y + y) *
1732  pict->linesize[1]) = c;
1733  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1734  (block_height * mb_y + y) *
1735  pict->linesize[2]) = c;
1736  }
1737  }
1738  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1739  pict->motion_val) {
1740  int mb_type = pict->mb_type[mb_index];
1741  uint64_t u,v;
1742  int y;
1743 #define COLOR(theta, r) \
1744  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1745  v = (int)(128 + r * sin(theta * 3.141592 / 180));
1746 
1747 
1748  u = v = 128;
1749  if (IS_PCM(mb_type)) {
1750  COLOR(120, 48)
1751  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1752  IS_INTRA16x16(mb_type)) {
1753  COLOR(30, 48)
1754  } else if (IS_INTRA4x4(mb_type)) {
1755  COLOR(90, 48)
1756  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1757  // COLOR(120, 48)
1758  } else if (IS_DIRECT(mb_type)) {
1759  COLOR(150, 48)
1760  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1761  COLOR(170, 48)
1762  } else if (IS_GMC(mb_type)) {
1763  COLOR(190, 48)
1764  } else if (IS_SKIP(mb_type)) {
1765  // COLOR(180, 48)
1766  } else if (!USES_LIST(mb_type, 1)) {
1767  COLOR(240, 48)
1768  } else if (!USES_LIST(mb_type, 0)) {
1769  COLOR(0, 48)
1770  } else {
1771  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1772  COLOR(300,48)
1773  }
1774 
1775  u *= 0x0101010101010101ULL;
1776  v *= 0x0101010101010101ULL;
1777  for (y = 0; y < block_height; y++) {
1778  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1779  (block_height * mb_y + y) * pict->linesize[1]) = u;
1780  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1781  (block_height * mb_y + y) * pict->linesize[2]) = v;
1782  }
1783 
1784  // segmentation
1785  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1786  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1787  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1788  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1789  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1790  }
1791  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1792  for (y = 0; y < 16; y++)
1793  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1794  pict->linesize[0]] ^= 0x80;
1795  }
1796  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1797  int dm = 1 << (mv_sample_log2 - 2);
1798  for (i = 0; i < 4; i++) {
1799  int sx = mb_x * 16 + 8 * (i & 1);
1800  int sy = mb_y * 16 + 8 * (i >> 1);
1801  int xy = (mb_x * 2 + (i & 1) +
1802  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1803  // FIXME bidir
1804  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1805  if (mv[0] != mv[dm] ||
1806  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1807  for (y = 0; y < 8; y++)
1808  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1809  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1810  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1811  pict->linesize[0]) ^= 0x8080808080808080ULL;
1812  }
1813  }
1814 
1815  if (IS_INTERLACED(mb_type) &&
1816  s->codec_id == CODEC_ID_H264) {
1817  // hmm
1818  }
1819  }
1820  s->mbskip_table[mb_index] = 0;
1821  }
1822  }
1823  }
1824 }
1825 
1826 static inline int hpel_motion_lowres(MpegEncContext *s,
1827  uint8_t *dest, uint8_t *src,
1828  int field_based, int field_select,
1829  int src_x, int src_y,
1830  int width, int height, int stride,
1831  int h_edge_pos, int v_edge_pos,
1832  int w, int h, h264_chroma_mc_func *pix_op,
1833  int motion_x, int motion_y)
1834 {
1835  const int lowres = s->avctx->lowres;
1836  const int op_index = FFMIN(lowres, 2);
1837  const int s_mask = (2 << lowres) - 1;
1838  int emu = 0;
1839  int sx, sy;
1840 
1841  if (s->quarter_sample) {
1842  motion_x /= 2;
1843  motion_y /= 2;
1844  }
1845 
1846  sx = motion_x & s_mask;
1847  sy = motion_y & s_mask;
1848  src_x += motion_x >> lowres + 1;
1849  src_y += motion_y >> lowres + 1;
1850 
1851  src += src_y * stride + src_x;
1852 
1853  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1854  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1855  s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1856  (h + 1) << field_based, src_x,
1857  src_y << field_based,
1858  h_edge_pos,
1859  v_edge_pos);
1860  src = s->edge_emu_buffer;
1861  emu = 1;
1862  }
1863 
1864  sx = (sx << 2) >> lowres;
1865  sy = (sy << 2) >> lowres;
1866  if (field_select)
1867  src += s->linesize;
1868  pix_op[op_index](dest, src, stride, h, sx, sy);
1869  return emu;
1870 }
1871 
1872 /* apply one mpeg motion vector to the three components */
1874  uint8_t *dest_y,
1875  uint8_t *dest_cb,
1876  uint8_t *dest_cr,
1877  int field_based,
1878  int bottom_field,
1879  int field_select,
1880  uint8_t **ref_picture,
1881  h264_chroma_mc_func *pix_op,
1882  int motion_x, int motion_y,
1883  int h, int mb_y)
1884 {
1885  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1886  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1887  uvsx, uvsy;
1888  const int lowres = s->avctx->lowres;
1889  const int op_index = FFMIN(lowres, 2);
1890  const int block_s = 8>>lowres;
1891  const int s_mask = (2 << lowres) - 1;
1892  const int h_edge_pos = s->h_edge_pos >> lowres;
1893  const int v_edge_pos = s->v_edge_pos >> lowres;
1894  linesize = s->current_picture.f.linesize[0] << field_based;
1895  uvlinesize = s->current_picture.f.linesize[1] << field_based;
1896 
1897  // FIXME obviously not perfect but qpel will not work in lowres anyway
1898  if (s->quarter_sample) {
1899  motion_x /= 2;
1900  motion_y /= 2;
1901  }
1902 
1903  if (field_based) {
1904  motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1905  }
1906 
1907  sx = motion_x & s_mask;
1908  sy = motion_y & s_mask;
1909  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1910  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1911 
1912  if (s->out_format == FMT_H263) {
1913  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1914  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1915  uvsrc_x = src_x >> 1;
1916  uvsrc_y = src_y >> 1;
1917  } else if (s->out_format == FMT_H261) {
1918  // even chroma mv's are full pel in H261
1919  mx = motion_x / 4;
1920  my = motion_y / 4;
1921  uvsx = (2 * mx) & s_mask;
1922  uvsy = (2 * my) & s_mask;
1923  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1924  uvsrc_y = mb_y * block_s + (my >> lowres);
1925  } else {
1926  mx = motion_x / 2;
1927  my = motion_y / 2;
1928  uvsx = mx & s_mask;
1929  uvsy = my & s_mask;
1930  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1931  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1932  }
1933 
1934  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1935  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1936  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1937 
1938  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1939  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1940  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1941  s->linesize, 17, 17 + field_based,
1942  src_x, src_y << field_based, h_edge_pos,
1943  v_edge_pos);
1944  ptr_y = s->edge_emu_buffer;
1945  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1946  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1947  s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1948  9 + field_based,
1949  uvsrc_x, uvsrc_y << field_based,
1950  h_edge_pos >> 1, v_edge_pos >> 1);
1951  s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1952  9 + field_based,
1953  uvsrc_x, uvsrc_y << field_based,
1954  h_edge_pos >> 1, v_edge_pos >> 1);
1955  ptr_cb = uvbuf;
1956  ptr_cr = uvbuf + 16;
1957  }
1958  }
1959 
1960  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1961  if (bottom_field) {
1962  dest_y += s->linesize;
1963  dest_cb += s->uvlinesize;
1964  dest_cr += s->uvlinesize;
1965  }
1966 
1967  if (field_select) {
1968  ptr_y += s->linesize;
1969  ptr_cb += s->uvlinesize;
1970  ptr_cr += s->uvlinesize;
1971  }
1972 
1973  sx = (sx << 2) >> lowres;
1974  sy = (sy << 2) >> lowres;
1975  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1976 
1977  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1978  uvsx = (uvsx << 2) >> lowres;
1979  uvsy = (uvsy << 2) >> lowres;
1980  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1981  uvsx, uvsy);
1982  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1983  uvsx, uvsy);
1984  }
1985  // FIXME h261 lowres loop filter
1986 }
1987 
1989  uint8_t *dest_cb, uint8_t *dest_cr,
1990  uint8_t **ref_picture,
1991  h264_chroma_mc_func * pix_op,
1992  int mx, int my)
1993 {
1994  const int lowres = s->avctx->lowres;
1995  const int op_index = FFMIN(lowres, 2);
1996  const int block_s = 8 >> lowres;
1997  const int s_mask = (2 << lowres) - 1;
1998  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1999  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2000  int emu = 0, src_x, src_y, offset, sx, sy;
2001  uint8_t *ptr;
2002 
2003  if (s->quarter_sample) {
2004  mx /= 2;
2005  my /= 2;
2006  }
2007 
2008  /* In case of 8X8, we construct a single chroma motion vector
2009  with a special rounding */
2010  mx = ff_h263_round_chroma(mx);
2011  my = ff_h263_round_chroma(my);
2012 
2013  sx = mx & s_mask;
2014  sy = my & s_mask;
2015  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2016  src_y = s->mb_y * block_s + (my >> lowres + 1);
2017 
2018  offset = src_y * s->uvlinesize + src_x;
2019  ptr = ref_picture[1] + offset;
2020  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2021  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2022  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2024  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2025  ptr = s->edge_emu_buffer;
2026  emu = 1;
2027  }
2028  }
2029  sx = (sx << 2) >> lowres;
2030  sy = (sy << 2) >> lowres;
2031  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2032 
2033  ptr = ref_picture[2] + offset;
2034  if (emu) {
2035  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2036  src_x, src_y, h_edge_pos, v_edge_pos);
2037  ptr = s->edge_emu_buffer;
2038  }
2039  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2040 }
2041 
2053 static inline void MPV_motion_lowres(MpegEncContext *s,
2054  uint8_t *dest_y, uint8_t *dest_cb,
2055  uint8_t *dest_cr,
2056  int dir, uint8_t **ref_picture,
2057  h264_chroma_mc_func *pix_op)
2058 {
2059  int mx, my;
2060  int mb_x, mb_y, i;
2061  const int lowres = s->avctx->lowres;
2062  const int block_s = 8 >>lowres;
2063 
2064  mb_x = s->mb_x;
2065  mb_y = s->mb_y;
2066 
2067  switch (s->mv_type) {
2068  case MV_TYPE_16X16:
2069  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2070  0, 0, 0,
2071  ref_picture, pix_op,
2072  s->mv[dir][0][0], s->mv[dir][0][1],
2073  2 * block_s, mb_y);
2074  break;
2075  case MV_TYPE_8X8:
2076  mx = 0;
2077  my = 0;
2078  for (i = 0; i < 4; i++) {
2079  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2080  s->linesize) * block_s,
2081  ref_picture[0], 0, 0,
2082  (2 * mb_x + (i & 1)) * block_s,
2083  (2 * mb_y + (i >> 1)) * block_s,
2084  s->width, s->height, s->linesize,
2085  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2086  block_s, block_s, pix_op,
2087  s->mv[dir][i][0], s->mv[dir][i][1]);
2088 
2089  mx += s->mv[dir][i][0];
2090  my += s->mv[dir][i][1];
2091  }
2092 
2093  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2094  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2095  pix_op, mx, my);
2096  break;
2097  case MV_TYPE_FIELD:
2098  if (s->picture_structure == PICT_FRAME) {
2099  /* top field */
2100  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2101  1, 0, s->field_select[dir][0],
2102  ref_picture, pix_op,
2103  s->mv[dir][0][0], s->mv[dir][0][1],
2104  block_s, mb_y);
2105  /* bottom field */
2106  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2107  1, 1, s->field_select[dir][1],
2108  ref_picture, pix_op,
2109  s->mv[dir][1][0], s->mv[dir][1][1],
2110  block_s, mb_y);
2111  } else {
2112  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2113  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2114  ref_picture = s->current_picture_ptr->f.data;
2115 
2116  }
2117  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2118  0, 0, s->field_select[dir][0],
2119  ref_picture, pix_op,
2120  s->mv[dir][0][0],
2121  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2122  }
2123  break;
2124  case MV_TYPE_16X8:
2125  for (i = 0; i < 2; i++) {
2126  uint8_t **ref2picture;
2127 
2128  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2129  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2130  ref2picture = ref_picture;
2131  } else {
2132  ref2picture = s->current_picture_ptr->f.data;
2133  }
2134 
2135  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2136  0, 0, s->field_select[dir][i],
2137  ref2picture, pix_op,
2138  s->mv[dir][i][0], s->mv[dir][i][1] +
2139  2 * block_s * i, block_s, mb_y >> 1);
2140 
2141  dest_y += 2 * block_s * s->linesize;
2142  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2143  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2144  }
2145  break;
2146  case MV_TYPE_DMV:
2147  if (s->picture_structure == PICT_FRAME) {
2148  for (i = 0; i < 2; i++) {
2149  int j;
2150  for (j = 0; j < 2; j++) {
2151  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2152  1, j, j ^ i,
2153  ref_picture, pix_op,
2154  s->mv[dir][2 * i + j][0],
2155  s->mv[dir][2 * i + j][1],
2156  block_s, mb_y);
2157  }
2158  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2159  }
2160  } else {
2161  for (i = 0; i < 2; i++) {
2162  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2163  0, 0, s->picture_structure != i + 1,
2164  ref_picture, pix_op,
2165  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2166  2 * block_s, mb_y >> 1);
2167 
2168  // after put we make avg of the same block
2169  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2170 
2171  // opposite parity is always in the same
2172  // frame if this is second field
2173  if (!s->first_field) {
2174  ref_picture = s->current_picture_ptr->f.data;
2175  }
2176  }
2177  }
2178  break;
2179  default:
2180  assert(0);
2181  }
2182 }
2183 
2188 {
2189  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2190  int my, off, i, mvs;
2191 
2192  if (s->picture_structure != PICT_FRAME) goto unhandled;
2193 
2194  switch (s->mv_type) {
2195  case MV_TYPE_16X16:
2196  mvs = 1;
2197  break;
2198  case MV_TYPE_16X8:
2199  mvs = 2;
2200  break;
2201  case MV_TYPE_8X8:
2202  mvs = 4;
2203  break;
2204  default:
2205  goto unhandled;
2206  }
2207 
2208  for (i = 0; i < mvs; i++) {
2209  my = s->mv[dir][i][1]<<qpel_shift;
2210  my_max = FFMAX(my_max, my);
2211  my_min = FFMIN(my_min, my);
2212  }
2213 
2214  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2215 
2216  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2217 unhandled:
2218  return s->mb_height-1;
2219 }
2220 
2221 /* put block[] to dest[] */
2222 static inline void put_dct(MpegEncContext *s,
2223  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2224 {
2225  s->dct_unquantize_intra(s, block, i, qscale);
2226  s->dsp.idct_put (dest, line_size, block);
2227 }
2228 
2229 /* add block[] to dest[] */
2230 static inline void add_dct(MpegEncContext *s,
2231  DCTELEM *block, int i, uint8_t *dest, int line_size)
2232 {
2233  if (s->block_last_index[i] >= 0) {
2234  s->dsp.idct_add (dest, line_size, block);
2235  }
2236 }
2237 
2238 static inline void add_dequant_dct(MpegEncContext *s,
2239  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2240 {
2241  if (s->block_last_index[i] >= 0) {
2242  s->dct_unquantize_inter(s, block, i, qscale);
2243 
2244  s->dsp.idct_add (dest, line_size, block);
2245  }
2246 }
2247 
2252 {
2253  int wrap = s->b8_stride;
2254  int xy = s->block_index[0];
2255 
2256  s->dc_val[0][xy ] =
2257  s->dc_val[0][xy + 1 ] =
2258  s->dc_val[0][xy + wrap] =
2259  s->dc_val[0][xy + 1 + wrap] = 1024;
2260  /* ac pred */
2261  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2262  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2263  if (s->msmpeg4_version>=3) {
2264  s->coded_block[xy ] =
2265  s->coded_block[xy + 1 ] =
2266  s->coded_block[xy + wrap] =
2267  s->coded_block[xy + 1 + wrap] = 0;
2268  }
2269  /* chroma */
2270  wrap = s->mb_stride;
2271  xy = s->mb_x + s->mb_y * wrap;
2272  s->dc_val[1][xy] =
2273  s->dc_val[2][xy] = 1024;
2274  /* ac pred */
2275  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2276  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2277 
2278  s->mbintra_table[xy]= 0;
2279 }
2280 
2281 /* generic function called after a macroblock has been parsed by the
2282  decoder or after it has been encoded by the encoder.
2283 
2284  Important variables used:
2285  s->mb_intra : true if intra macroblock
2286  s->mv_dir : motion vector direction
2287  s->mv_type : motion vector type
2288  s->mv : motion vector
2289  s->interlaced_dct : true if interlaced dct used (mpeg2)
2290  */
2291 static av_always_inline
2293  int lowres_flag, int is_mpeg12)
2294 {
2295  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2297  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2298  return;
2299  }
2300 
2301  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2302  /* save DCT coefficients */
2303  int i,j;
2304  DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2305  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2306  for(i=0; i<6; i++){
2307  for(j=0; j<64; j++){
2308  *dct++ = block[i][s->dsp.idct_permutation[j]];
2309  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2310  }
2311  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2312  }
2313  }
2314 
2315  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2316 
2317  /* update DC predictors for P macroblocks */
2318  if (!s->mb_intra) {
2319  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2320  if(s->mbintra_table[mb_xy])
2322  } else {
2323  s->last_dc[0] =
2324  s->last_dc[1] =
2325  s->last_dc[2] = 128 << s->intra_dc_precision;
2326  }
2327  }
2328  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2329  s->mbintra_table[mb_xy]=1;
2330 
2331  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2332  uint8_t *dest_y, *dest_cb, *dest_cr;
2333  int dct_linesize, dct_offset;
2334  op_pixels_func (*op_pix)[4];
2335  qpel_mc_func (*op_qpix)[16];
2336  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2337  const int uvlinesize = s->current_picture.f.linesize[1];
2338  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2339  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2340 
2341  /* avoid copy if macroblock skipped in last frame too */
2342  /* skip only during decoding as we might trash the buffers during encoding a bit */
2343  if(!s->encoding){
2344  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2345 
2346  if (s->mb_skipped) {
2347  s->mb_skipped= 0;
2348  assert(s->pict_type!=AV_PICTURE_TYPE_I);
2349  *mbskip_ptr = 1;
2350  } else if(!s->current_picture.f.reference) {
2351  *mbskip_ptr = 1;
2352  } else{
2353  *mbskip_ptr = 0; /* not skipped */
2354  }
2355  }
2356 
2357  dct_linesize = linesize << s->interlaced_dct;
2358  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2359 
2360  if(readable){
2361  dest_y= s->dest[0];
2362  dest_cb= s->dest[1];
2363  dest_cr= s->dest[2];
2364  }else{
2365  dest_y = s->b_scratchpad;
2366  dest_cb= s->b_scratchpad+16*linesize;
2367  dest_cr= s->b_scratchpad+32*linesize;
2368  }
2369 
2370  if (!s->mb_intra) {
2371  /* motion handling */
2372  /* decoding or more than one mb_type (MC was already done otherwise) */
2373  if(!s->encoding){
2374 
2376  if (s->mv_dir & MV_DIR_FORWARD) {
2378  }
2379  if (s->mv_dir & MV_DIR_BACKWARD) {
2381  }
2382  }
2383 
2384  if(lowres_flag){
2386 
2387  if (s->mv_dir & MV_DIR_FORWARD) {
2388  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2389  op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2390  }
2391  if (s->mv_dir & MV_DIR_BACKWARD) {
2392  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2393  }
2394  }else{
2395  op_qpix= s->me.qpel_put;
2396  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2397  op_pix = s->dsp.put_pixels_tab;
2398  }else{
2399  op_pix = s->dsp.put_no_rnd_pixels_tab;
2400  }
2401  if (s->mv_dir & MV_DIR_FORWARD) {
2402  MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2403  op_pix = s->dsp.avg_pixels_tab;
2404  op_qpix= s->me.qpel_avg;
2405  }
2406  if (s->mv_dir & MV_DIR_BACKWARD) {
2407  MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2408  }
2409  }
2410  }
2411 
2412  /* skip dequant / idct if we are really late ;) */
2413  if(s->avctx->skip_idct){
2416  || s->avctx->skip_idct >= AVDISCARD_ALL)
2417  goto skip_idct;
2418  }
2419 
2420  /* add dct residue */
2422  || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2423  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2424  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2425  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2426  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2427 
2428  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2429  if (s->chroma_y_shift){
2430  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2431  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2432  }else{
2433  dct_linesize >>= 1;
2434  dct_offset >>=1;
2435  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2436  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2437  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2438  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2439  }
2440  }
2441  } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2442  add_dct(s, block[0], 0, dest_y , dct_linesize);
2443  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2444  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2445  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2446 
2447  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2448  if(s->chroma_y_shift){//Chroma420
2449  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2450  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2451  }else{
2452  //chroma422
2453  dct_linesize = uvlinesize << s->interlaced_dct;
2454  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2455 
2456  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2457  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2458  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2459  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2460  if(!s->chroma_x_shift){//Chroma444
2461  add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2462  add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2463  add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2464  add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2465  }
2466  }
2467  }//fi gray
2468  }
2470  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2471  }
2472  } else {
2473  /* dct only in intra block */
2475  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2476  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2477  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2478  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2479 
2480  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2481  if(s->chroma_y_shift){
2482  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2483  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2484  }else{
2485  dct_offset >>=1;
2486  dct_linesize >>=1;
2487  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2488  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2489  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2490  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2491  }
2492  }
2493  }else{
2494  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2495  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2496  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2497  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2498 
2499  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2500  if(s->chroma_y_shift){
2501  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2502  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2503  }else{
2504 
2505  dct_linesize = uvlinesize << s->interlaced_dct;
2506  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2507 
2508  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2509  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2510  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2511  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2512  if(!s->chroma_x_shift){//Chroma444
2513  s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2514  s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2515  s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2516  s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2517  }
2518  }
2519  }//gray
2520  }
2521  }
2522 skip_idct:
2523  if(!readable){
2524  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2525  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2526  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2527  }
2528  }
2529 }
2530 
2532 #if !CONFIG_SMALL
2533  if(s->out_format == FMT_MPEG1) {
2534  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2535  else MPV_decode_mb_internal(s, block, 0, 1);
2536  } else
2537 #endif
2538  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2539  else MPV_decode_mb_internal(s, block, 0, 0);
2540 }
2541 
2545 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2546  const int field_pic= s->picture_structure != PICT_FRAME;
2547  if(field_pic){
2548  h <<= 1;
2549  y <<= 1;
2550  }
2551 
2552  if (!s->avctx->hwaccel
2554  && s->unrestricted_mv
2556  && !s->intra_only
2557  && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2558  int sides = 0, edge_h;
2561  if (y==0) sides |= EDGE_TOP;
2562  if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2563 
2564  edge_h= FFMIN(h, s->v_edge_pos - y);
2565 
2566  s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2567  s->linesize, s->h_edge_pos, edge_h,
2568  EDGE_WIDTH, EDGE_WIDTH, sides);
2569  s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2570  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2571  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2572  s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2573  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2574  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2575  }
2576 
2577  h= FFMIN(h, s->avctx->height - y);
2578 
2579  if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2580 
2581  if (s->avctx->draw_horiz_band) {
2582  AVFrame *src;
2583  int offset[AV_NUM_DATA_POINTERS];
2584  int i;
2585 
2587  src= (AVFrame*)s->current_picture_ptr;
2588  else if(s->last_picture_ptr)
2589  src= (AVFrame*)s->last_picture_ptr;
2590  else
2591  return;
2592 
2594  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2595  offset[i] = 0;
2596  }else{
2597  offset[0]= y * s->linesize;
2598  offset[1]=
2599  offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2600  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2601  offset[i] = 0;
2602  }
2603 
2604  emms_c();
2605 
2606  s->avctx->draw_horiz_band(s->avctx, src, offset,
2607  y, s->picture_structure, h);
2608  }
2609 }
2610 
2611 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2612  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2613  const int uvlinesize = s->current_picture.f.linesize[1];
2614  const int mb_size= 4 - s->avctx->lowres;
2615 
2616  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2617  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2618  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2619  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2620  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2621  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2622  //block_index is not used by mpeg2, so it is not affected by chroma_format
2623 
2624  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2625  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2626  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2627 
2629  {
2630  if(s->picture_structure==PICT_FRAME){
2631  s->dest[0] += s->mb_y * linesize << mb_size;
2632  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2633  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2634  }else{
2635  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2636  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2637  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2638  assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2639  }
2640  }
2641 }
2642 
2644  int i;
2645  MpegEncContext *s = avctx->priv_data;
2646 
2647  if(s==NULL || s->picture==NULL)
2648  return;
2649 
2650  for(i=0; i<s->picture_count; i++){
2651  if (s->picture[i].f.data[0] &&
2652  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2653  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2654  free_frame_buffer(s, &s->picture[i]);
2655  }
2657 
2658  s->mb_x= s->mb_y= 0;
2659 
2660  s->parse_context.state= -1;
2662  s->parse_context.overread= 0;
2664  s->parse_context.index= 0;
2665  s->parse_context.last_index= 0;
2666  s->bitstream_buffer_size=0;
2667  s->pp_time=0;
2668 }
2669 
2671  DCTELEM *block, int n, int qscale)
2672 {
2673  int i, level, nCoeffs;
2674  const uint16_t *quant_matrix;
2675 
2676  nCoeffs= s->block_last_index[n];
2677 
2678  if (n < 4)
2679  block[0] = block[0] * s->y_dc_scale;
2680  else
2681  block[0] = block[0] * s->c_dc_scale;
2682  /* XXX: only mpeg1 */
2683  quant_matrix = s->intra_matrix;
2684  for(i=1;i<=nCoeffs;i++) {
2685  int j= s->intra_scantable.permutated[i];
2686  level = block[j];
2687  if (level) {
2688  if (level < 0) {
2689  level = -level;
2690  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2691  level = (level - 1) | 1;
2692  level = -level;
2693  } else {
2694  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2695  level = (level - 1) | 1;
2696  }
2697  block[j] = level;
2698  }
2699  }
2700 }
2701 
2703  DCTELEM *block, int n, int qscale)
2704 {
2705  int i, level, nCoeffs;
2706  const uint16_t *quant_matrix;
2707 
2708  nCoeffs= s->block_last_index[n];
2709 
2710  quant_matrix = s->inter_matrix;
2711  for(i=0; i<=nCoeffs; i++) {
2712  int j= s->intra_scantable.permutated[i];
2713  level = block[j];
2714  if (level) {
2715  if (level < 0) {
2716  level = -level;
2717  level = (((level << 1) + 1) * qscale *
2718  ((int) (quant_matrix[j]))) >> 4;
2719  level = (level - 1) | 1;
2720  level = -level;
2721  } else {
2722  level = (((level << 1) + 1) * qscale *
2723  ((int) (quant_matrix[j]))) >> 4;
2724  level = (level - 1) | 1;
2725  }
2726  block[j] = level;
2727  }
2728  }
2729 }
2730 
2732  DCTELEM *block, int n, int qscale)
2733 {
2734  int i, level, nCoeffs;
2735  const uint16_t *quant_matrix;
2736 
2737  if(s->alternate_scan) nCoeffs= 63;
2738  else nCoeffs= s->block_last_index[n];
2739 
2740  if (n < 4)
2741  block[0] = block[0] * s->y_dc_scale;
2742  else
2743  block[0] = block[0] * s->c_dc_scale;
2744  quant_matrix = s->intra_matrix;
2745  for(i=1;i<=nCoeffs;i++) {
2746  int j= s->intra_scantable.permutated[i];
2747  level = block[j];
2748  if (level) {
2749  if (level < 0) {
2750  level = -level;
2751  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2752  level = -level;
2753  } else {
2754  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2755  }
2756  block[j] = level;
2757  }
2758  }
2759 }
2760 
2762  DCTELEM *block, int n, int qscale)
2763 {
2764  int i, level, nCoeffs;
2765  const uint16_t *quant_matrix;
2766  int sum=-1;
2767 
2768  if(s->alternate_scan) nCoeffs= 63;
2769  else nCoeffs= s->block_last_index[n];
2770 
2771  if (n < 4)
2772  block[0] = block[0] * s->y_dc_scale;
2773  else
2774  block[0] = block[0] * s->c_dc_scale;
2775  quant_matrix = s->intra_matrix;
2776  for(i=1;i<=nCoeffs;i++) {
2777  int j= s->intra_scantable.permutated[i];
2778  level = block[j];
2779  if (level) {
2780  if (level < 0) {
2781  level = -level;
2782  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2783  level = -level;
2784  } else {
2785  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2786  }
2787  block[j] = level;
2788  sum+=level;
2789  }
2790  }
2791  block[63]^=sum&1;
2792 }
2793 
2795  DCTELEM *block, int n, int qscale)
2796 {
2797  int i, level, nCoeffs;
2798  const uint16_t *quant_matrix;
2799  int sum=-1;
2800 
2801  if(s->alternate_scan) nCoeffs= 63;
2802  else nCoeffs= s->block_last_index[n];
2803 
2804  quant_matrix = s->inter_matrix;
2805  for(i=0; i<=nCoeffs; i++) {
2806  int j= s->intra_scantable.permutated[i];
2807  level = block[j];
2808  if (level) {
2809  if (level < 0) {
2810  level = -level;
2811  level = (((level << 1) + 1) * qscale *
2812  ((int) (quant_matrix[j]))) >> 4;
2813  level = -level;
2814  } else {
2815  level = (((level << 1) + 1) * qscale *
2816  ((int) (quant_matrix[j]))) >> 4;
2817  }
2818  block[j] = level;
2819  sum+=level;
2820  }
2821  }
2822  block[63]^=sum&1;
2823 }
2824 
2826  DCTELEM *block, int n, int qscale)
2827 {
2828  int i, level, qmul, qadd;
2829  int nCoeffs;
2830 
2831  assert(s->block_last_index[n]>=0);
2832 
2833  qmul = qscale << 1;
2834 
2835  if (!s->h263_aic) {
2836  if (n < 4)
2837  block[0] = block[0] * s->y_dc_scale;
2838  else
2839  block[0] = block[0] * s->c_dc_scale;
2840  qadd = (qscale - 1) | 1;
2841  }else{
2842  qadd = 0;
2843  }
2844  if(s->ac_pred)
2845  nCoeffs=63;
2846  else
2847  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2848 
2849  for(i=1; i<=nCoeffs; i++) {
2850  level = block[i];
2851  if (level) {
2852  if (level < 0) {
2853  level = level * qmul - qadd;
2854  } else {
2855  level = level * qmul + qadd;
2856  }
2857  block[i] = level;
2858  }
2859  }
2860 }
2861 
2863  DCTELEM *block, int n, int qscale)
2864 {
2865  int i, level, qmul, qadd;
2866  int nCoeffs;
2867 
2868  assert(s->block_last_index[n]>=0);
2869 
2870  qadd = (qscale - 1) | 1;
2871  qmul = qscale << 1;
2872 
2873  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2874 
2875  for(i=0; i<=nCoeffs; i++) {
2876  level = block[i];
2877  if (level) {
2878  if (level < 0) {
2879  level = level * qmul - qadd;
2880  } else {
2881  level = level * qmul + qadd;
2882  }
2883  block[i] = level;
2884  }
2885  }
2886 }
2887 
2891 void ff_set_qscale(MpegEncContext * s, int qscale)
2892 {
2893  if (qscale < 1)
2894  qscale = 1;
2895  else if (qscale > 31)
2896  qscale = 31;
2897 
2898  s->qscale = qscale;
2899  s->chroma_qscale= s->chroma_qscale_table[qscale];
2900 
2901  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2903 }
2904 
2906 {
2909 }