mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43 
44 //#undef NDEBUG
45 //#include <assert.h>
46 
48  DCTELEM *block, int n, int qscale);
50  DCTELEM *block, int n, int qscale);
52  DCTELEM *block, int n, int qscale);
54  DCTELEM *block, int n, int qscale);
56  DCTELEM *block, int n, int qscale);
58  DCTELEM *block, int n, int qscale);
60  DCTELEM *block, int n, int qscale);
61 
62 
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65 
66 //#define DEBUG
67 
68 
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 };
74 
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 };
86 
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 };
98 
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 };
110 
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 };
122 
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
128 };
129 
133 };
134 
141 };
142 
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144  const uint8_t *end,
145  uint32_t * restrict state)
146 {
147  int i;
148 
149  assert(p <= end);
150  if (p >= end)
151  return end;
152 
153  for (i = 0; i < 3; i++) {
154  uint32_t tmp = *state << 8;
155  *state = tmp + *(p++);
156  if (tmp == 0x100 || p == end)
157  return p;
158  }
159 
160  while (p < end) {
161  if (p[-1] > 1 ) p += 3;
162  else if (p[-2] ) p += 2;
163  else if (p[-3]|(p[-1]-1)) p++;
164  else {
165  p++;
166  break;
167  }
168  }
169 
170  p = FFMIN(p, end) - 4;
171  *state = AV_RB32(p);
172 
173  return p + 4;
174 }
175 
176 /* init common dct for both encoder and decoder */
178 {
179  dsputil_init(&s->dsp, s->avctx);
180 
186  if (s->flags & CODEC_FLAG_BITEXACT)
189 
190 #if HAVE_MMX
192 #elif ARCH_ALPHA
194 #elif CONFIG_MLIB
196 #elif HAVE_MMI
198 #elif ARCH_ARM
200 #elif HAVE_ALTIVEC
202 #elif ARCH_BFIN
204 #endif
205 
206  /* load & permutate scantables
207  * note: only wmv uses different ones
208  */
209  if (s->alternate_scan) {
212  } else {
215  }
218 
219  return 0;
220 }
221 
223 {
224  *dst = *src;
225  dst->f.type = FF_BUFFER_TYPE_COPY;
226 }
227 
232 {
233  /* Windows Media Image codecs allocate internal buffers with different
234  * dimensions; ignore user defined callbacks for these
235  */
238  else
241 }
242 
247 {
248  int r;
249 
250  if (s->avctx->hwaccel) {
251  assert(!pic->f.hwaccel_picture_private);
252  if (s->avctx->hwaccel->priv_data_size) {
254  if (!pic->f.hwaccel_picture_private) {
255  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256  return -1;
257  }
258  }
259  }
260 
262  r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
263  else
264  r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
265 
266  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
267  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
268  r, pic->f.type, pic->f.data[0]);
270  return -1;
271  }
272 
273  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274  s->uvlinesize != pic->f.linesize[1])) {
276  "get_buffer() failed (stride changed)\n");
277  free_frame_buffer(s, pic);
278  return -1;
279  }
280 
281  if (pic->f.linesize[1] != pic->f.linesize[2]) {
283  "get_buffer() failed (uv stride mismatch)\n");
284  free_frame_buffer(s, pic);
285  return -1;
286  }
287 
288  return 0;
289 }
290 
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 {
297  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 
299  // the + 1 is needed so memset(,,stride*height) does not sig11
300 
301  const int mb_array_size = s->mb_stride * s->mb_height;
302  const int b8_array_size = s->b8_stride * s->mb_height * 2;
303  const int b4_array_size = s->b4_stride * s->mb_height * 4;
304  int i;
305  int r = -1;
306 
307  if (shared) {
308  assert(pic->f.data[0]);
309  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
311  } else {
312  assert(!pic->f.data[0]);
313 
314  if (alloc_frame_buffer(s, pic) < 0)
315  return -1;
316 
317  s->linesize = pic->f.linesize[0];
318  s->uvlinesize = pic->f.linesize[1];
319  }
320 
321  if (pic->f.qscale_table == NULL) {
322  if (s->encoding) {
323  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324  mb_array_size * sizeof(int16_t), fail)
326  mb_array_size * sizeof(int16_t), fail)
328  mb_array_size * sizeof(int8_t ), fail)
329  }
330 
332  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
334  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335  fail)
337  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338  fail)
339  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341  if (s->out_format == FMT_H264) {
342  for (i = 0; i < 2; i++) {
344  2 * (b4_array_size + 4) * sizeof(int16_t),
345  fail)
346  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348  4 * mb_array_size * sizeof(uint8_t), fail)
349  }
350  pic->f.motion_subsample_log2 = 2;
351  } else if (s->out_format == FMT_H263 || s->encoding ||
352  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353  for (i = 0; i < 2; i++) {
355  2 * (b8_array_size + 4) * sizeof(int16_t),
356  fail)
357  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359  4 * mb_array_size * sizeof(uint8_t), fail)
360  }
361  pic->f.motion_subsample_log2 = 3;
362  }
363  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
365  64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366  }
367  pic->f.qstride = s->mb_stride;
369  1 * sizeof(AVPanScan), fail)
370  }
371 
372  pic->owner2 = s;
373 
374  return 0;
375 fail: // for the FF_ALLOCZ_OR_GOTO macro
376  if (r >= 0)
377  free_frame_buffer(s, pic);
378  return -1;
379 }
380 
384 static void free_picture(MpegEncContext *s, Picture *pic)
385 {
386  int i;
387 
388  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
389  free_frame_buffer(s, pic);
390  }
391 
392  av_freep(&pic->mb_var);
393  av_freep(&pic->mc_mb_var);
394  av_freep(&pic->mb_mean);
395  av_freep(&pic->f.mbskip_table);
397  av_freep(&pic->mb_type_base);
398  av_freep(&pic->f.dct_coeff);
399  av_freep(&pic->f.pan_scan);
400  pic->f.mb_type = NULL;
401  for (i = 0; i < 2; i++) {
402  av_freep(&pic->motion_val_base[i]);
403  av_freep(&pic->f.ref_index[i]);
404  }
405 
406  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
407  for (i = 0; i < 4; i++) {
408  pic->f.base[i] =
409  pic->f.data[i] = NULL;
410  }
411  pic->f.type = 0;
412  }
413 }
414 
416 {
417  int y_size = s->b8_stride * (2 * s->mb_height + 1);
418  int c_size = s->mb_stride * (s->mb_height + 1);
419  int yc_size = y_size + 2 * c_size;
420  int i;
421 
422  // edge emu needs blocksize + filter length - 1
423  // (= 17x17 for halfpel / 21x21 for h264)
425  (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
426 
427  // FIXME should be linesize instead of s->width * 2
428  // but that is not known before get_buffer()
430  (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
431  s->me.temp = s->me.scratchpad;
432  s->rd_scratchpad = s->me.scratchpad;
433  s->b_scratchpad = s->me.scratchpad;
434  s->obmc_scratchpad = s->me.scratchpad + 16;
435  if (s->encoding) {
436  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
437  ME_MAP_SIZE * sizeof(uint32_t), fail)
439  ME_MAP_SIZE * sizeof(uint32_t), fail)
440  if (s->avctx->noise_reduction) {
442  2 * 64 * sizeof(int), fail)
443  }
444  }
445  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
446  s->block = s->blocks[0];
447 
448  for (i = 0; i < 12; i++) {
449  s->pblocks[i] = &s->block[i];
450  }
451 
452  if (s->out_format == FMT_H263) {
453  /* ac values */
455  yc_size * sizeof(int16_t) * 16, fail);
456  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
457  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
458  s->ac_val[2] = s->ac_val[1] + c_size;
459  }
460 
461  return 0;
462 fail:
463  return -1; // free() through MPV_common_end()
464 }
465 
467 {
468  if (s == NULL)
469  return;
470 
472  av_freep(&s->me.scratchpad);
473  s->me.temp =
474  s->rd_scratchpad =
475  s->b_scratchpad =
476  s->obmc_scratchpad = NULL;
477 
478  av_freep(&s->dct_error_sum);
479  av_freep(&s->me.map);
480  av_freep(&s->me.score_map);
481  av_freep(&s->blocks);
482  av_freep(&s->ac_val_base);
483  s->block = NULL;
484 }
485 
487 {
488 #define COPY(a) bak->a = src->a
489  COPY(edge_emu_buffer);
490  COPY(me.scratchpad);
491  COPY(me.temp);
492  COPY(rd_scratchpad);
493  COPY(b_scratchpad);
494  COPY(obmc_scratchpad);
495  COPY(me.map);
496  COPY(me.score_map);
497  COPY(blocks);
498  COPY(block);
499  COPY(start_mb_y);
500  COPY(end_mb_y);
501  COPY(me.map_generation);
502  COPY(pb);
503  COPY(dct_error_sum);
504  COPY(dct_count[0]);
505  COPY(dct_count[1]);
506  COPY(ac_val_base);
507  COPY(ac_val[0]);
508  COPY(ac_val[1]);
509  COPY(ac_val[2]);
510 #undef COPY
511 }
512 
514 {
515  MpegEncContext bak;
516  int i;
517  // FIXME copy only needed parts
518  // START_TIMER
519  backup_duplicate_context(&bak, dst);
520  memcpy(dst, src, sizeof(MpegEncContext));
521  backup_duplicate_context(dst, &bak);
522  for (i = 0; i < 12; i++) {
523  dst->pblocks[i] = &dst->block[i];
524  }
525  // STOP_TIMER("update_duplicate_context")
526  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 }
528 
530  const AVCodecContext *src)
531 {
532  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
533 
534  if (dst == src || !s1->context_initialized)
535  return 0;
536 
537  // FIXME can parameters change on I-frames?
538  // in that case dst may need a reinit
539  if (!s->context_initialized) {
540  memcpy(s, s1, sizeof(MpegEncContext));
541 
542  s->avctx = dst;
545  s->bitstream_buffer = NULL;
547 
548  MPV_common_init(s);
549  }
550 
551  s->avctx->coded_height = s1->avctx->coded_height;
552  s->avctx->coded_width = s1->avctx->coded_width;
553  s->avctx->width = s1->avctx->width;
554  s->avctx->height = s1->avctx->height;
555 
556  s->coded_picture_number = s1->coded_picture_number;
557  s->picture_number = s1->picture_number;
558  s->input_picture_number = s1->input_picture_number;
559 
560  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
561  memcpy(&s->last_picture, &s1->last_picture,
562  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
563 
564  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
565  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
566  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
567 
568  // Error/bug resilience
569  s->next_p_frame_damaged = s1->next_p_frame_damaged;
570  s->workaround_bugs = s1->workaround_bugs;
571 
572  // MPEG4 timing info
573  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
574  (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 
576  // B-frame info
577  s->max_b_frames = s1->max_b_frames;
578  s->low_delay = s1->low_delay;
579  s->dropable = s1->dropable;
580 
581  // DivX handling (doesn't work)
582  s->divx_packed = s1->divx_packed;
583 
584  if (s1->bitstream_buffer) {
585  if (s1->bitstream_buffer_size +
589  s1->allocated_bitstream_buffer_size);
590  s->bitstream_buffer_size = s1->bitstream_buffer_size;
591  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
592  s1->bitstream_buffer_size);
593  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
595  }
596 
597  // MPEG2/interlacing info
598  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
599  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
600 
601  if (!s1->first_field) {
602  s->last_pict_type = s1->pict_type;
603  if (s1->current_picture_ptr)
604  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
605 
606  if (s1->pict_type != AV_PICTURE_TYPE_B) {
607  s->last_non_b_pict_type = s1->pict_type;
608  }
609  }
610 
611  return 0;
612 }
613 
621 {
622  s->y_dc_scale_table =
625  s->progressive_frame = 1;
626  s->progressive_sequence = 1;
628 
629  s->coded_picture_number = 0;
630  s->picture_number = 0;
631  s->input_picture_number = 0;
632 
633  s->picture_in_gop_number = 0;
634 
635  s->f_code = 1;
636  s->b_code = 1;
637 
638  s->picture_range_start = 0;
640 
641  s->slice_context_count = 1;
642 }
643 
650 {
652 }
653 
659 {
660  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
661  int nb_slices = (HAVE_THREADS &&
663  s->avctx->thread_count : 1;
664 
665  if (s->encoding && s->avctx->slices)
666  nb_slices = s->avctx->slices;
667 
669  s->mb_height = (s->height + 31) / 32 * 2;
670  else if (s->codec_id != CODEC_ID_H264)
671  s->mb_height = (s->height + 15) / 16;
672 
673  if (s->avctx->pix_fmt == PIX_FMT_NONE) {
675  "decoding to PIX_FMT_NONE is not supported.\n");
676  return -1;
677  }
678 
679  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680  int max_slices;
681  if (s->mb_height)
682  max_slices = FFMIN(MAX_THREADS, s->mb_height);
683  else
684  max_slices = MAX_THREADS;
685  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
686  " reducing to %d\n", nb_slices, max_slices);
687  nb_slices = max_slices;
688  }
689 
690  if ((s->width || s->height) &&
691  av_image_check_size(s->width, s->height, 0, s->avctx))
692  return -1;
693 
695 
696  s->flags = s->avctx->flags;
697  s->flags2 = s->avctx->flags2;
698 
699  /* set chroma shifts */
701  &s->chroma_y_shift);
702 
703  /* convert fourcc to upper case */
705 
707 
708  if (s->width && s->height) {
709  s->mb_width = (s->width + 15) / 16;
710  s->mb_stride = s->mb_width + 1;
711  s->b8_stride = s->mb_width * 2 + 1;
712  s->b4_stride = s->mb_width * 4 + 1;
713  mb_array_size = s->mb_height * s->mb_stride;
714  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
715 
716  /* set default edge pos, will be overriden
717  * in decode_header if needed */
718  s->h_edge_pos = s->mb_width * 16;
719  s->v_edge_pos = s->mb_height * 16;
720 
721  s->mb_num = s->mb_width * s->mb_height;
722 
723  s->block_wrap[0] =
724  s->block_wrap[1] =
725  s->block_wrap[2] =
726  s->block_wrap[3] = s->b8_stride;
727  s->block_wrap[4] =
728  s->block_wrap[5] = s->mb_stride;
729 
730  y_size = s->b8_stride * (2 * s->mb_height + 1);
731  c_size = s->mb_stride * (s->mb_height + 1);
732  yc_size = y_size + 2 * c_size;
733 
735 
736  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
737  fail); // error ressilience code looks cleaner with this
738  for (y = 0; y < s->mb_height; y++)
739  for (x = 0; x < s->mb_width; x++)
740  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
741 
742  s->mb_index2xy[s->mb_height * s->mb_width] =
743  (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 
745  if (s->encoding) {
746  /* Allocate MV tables */
748  mv_table_size * 2 * sizeof(int16_t), fail);
750  mv_table_size * 2 * sizeof(int16_t), fail);
752  mv_table_size * 2 * sizeof(int16_t), fail);
754  mv_table_size * 2 * sizeof(int16_t), fail);
756  mv_table_size * 2 * sizeof(int16_t), fail);
758  mv_table_size * 2 * sizeof(int16_t), fail);
759  s->p_mv_table = s->p_mv_table_base +
760  s->mb_stride + 1;
762  s->mb_stride + 1;
764  s->mb_stride + 1;
766  s->mb_stride + 1;
768  s->mb_stride + 1;
770  s->mb_stride + 1;
771 
772  if (s->msmpeg4_version) {
774  2 * 2 * (MAX_LEVEL + 1) *
775  (MAX_RUN + 1) * 2 * sizeof(int), fail);
776  }
777  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
778 
779  /* Allocate MB type table */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
781  sizeof(uint16_t), fail); // needed for encoding
782 
783  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784  sizeof(int), fail);
785 
787  64 * 32 * sizeof(int), fail);
789  64 * 32 * sizeof(int), fail);
791  64 * 32 * 2 * sizeof(uint16_t), fail);
793  64 * 32 * 2 * sizeof(uint16_t), fail);
795  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
798 
799  if (s->avctx->noise_reduction) {
801  2 * 64 * sizeof(uint16_t), fail);
802  }
803  }
804  }
805 
808  s->picture_count * sizeof(Picture), fail);
809  for (i = 0; i < s->picture_count; i++) {
811  }
812 
813  if (s->width && s->height) {
815  mb_array_size * sizeof(uint8_t), fail);
816 
817  if (s->codec_id == CODEC_ID_MPEG4 ||
819  /* interlaced direct mode decoding tables */
820  for (i = 0; i < 2; i++) {
821  int j, k;
822  for (j = 0; j < 2; j++) {
823  for (k = 0; k < 2; k++) {
825  s->b_field_mv_table_base[i][j][k],
826  mv_table_size * 2 * sizeof(int16_t),
827  fail);
828  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829  s->mb_stride + 1;
830  }
832  mb_array_size * 2 * sizeof(uint8_t),
833  fail);
835  mv_table_size * 2 * sizeof(int16_t),
836  fail);
837  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838  + s->mb_stride + 1;
839  }
841  mb_array_size * 2 * sizeof(uint8_t),
842  fail);
843  }
844  }
845  if (s->out_format == FMT_H263) {
846  /* cbp values */
847  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
848  s->coded_block = s->coded_block_base + s->b8_stride + 1;
849 
850  /* cbp, ac_pred, pred_dir */
852  mb_array_size * sizeof(uint8_t), fail);
854  mb_array_size * sizeof(uint8_t), fail);
855  }
856 
857  if (s->h263_pred || s->h263_plus || !s->encoding) {
858  /* dc values */
859  // MN: we need these for error resilience of intra-frames
861  yc_size * sizeof(int16_t), fail);
862  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
863  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
864  s->dc_val[2] = s->dc_val[1] + c_size;
865  for (i = 0; i < yc_size; i++)
866  s->dc_val_base[i] = 1024;
867  }
868 
869  /* which mb is a intra block */
870  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
871  memset(s->mbintra_table, 1, mb_array_size);
872 
873  /* init macroblock skip table */
874  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
875  // Note the + 1 is for a quicker mpeg4 slice_end detection
876 
877  s->parse_context.state = -1;
879  s->avctx->debug_mv) {
880  s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
881  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882  s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
883  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
884  s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
885  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
886  }
887  }
888 
889  s->context_initialized = 1;
890  s->thread_context[0] = s;
891 
892  if (s->width && s->height) {
893  if (nb_slices > 1) {
894  for (i = 1; i < nb_slices; i++) {
895  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
896  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897  }
898 
899  for (i = 0; i < nb_slices; i++) {
900  if (init_duplicate_context(s->thread_context[i], s) < 0)
901  goto fail;
902  s->thread_context[i]->start_mb_y =
903  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
904  s->thread_context[i]->end_mb_y =
905  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906  }
907  } else {
908  if (init_duplicate_context(s, s) < 0)
909  goto fail;
910  s->start_mb_y = 0;
911  s->end_mb_y = s->mb_height;
912  }
913  s->slice_context_count = nb_slices;
914  }
915 
916  return 0;
917  fail:
918  MPV_common_end(s);
919  return -1;
920 }
921 
922 /* init common structure for both encoder and decoder */
924 {
925  int i, j, k;
926 
927  if (s->slice_context_count > 1) {
928  for (i = 0; i < s->slice_context_count; i++) {
930  }
931  for (i = 1; i < s->slice_context_count; i++) {
932  av_freep(&s->thread_context[i]);
933  }
934  s->slice_context_count = 1;
935  } else free_duplicate_context(s);
936 
938  s->parse_context.buffer_size = 0;
939 
940  av_freep(&s->mb_type);
947  s->p_mv_table = NULL;
948  s->b_forw_mv_table = NULL;
949  s->b_back_mv_table = NULL;
952  s->b_direct_mv_table = NULL;
953  for (i = 0; i < 2; i++) {
954  for (j = 0; j < 2; j++) {
955  for (k = 0; k < 2; k++) {
956  av_freep(&s->b_field_mv_table_base[i][j][k]);
957  s->b_field_mv_table[i][j][k] = NULL;
958  }
959  av_freep(&s->b_field_select_table[i][j]);
960  av_freep(&s->p_field_mv_table_base[i][j]);
961  s->p_field_mv_table[i][j] = NULL;
962  }
964  }
965 
966  av_freep(&s->dc_val_base);
968  av_freep(&s->mbintra_table);
969  av_freep(&s->cbp_table);
971 
972  av_freep(&s->mbskip_table);
975 
976  av_freep(&s->avctx->stats_out);
977  av_freep(&s->ac_stats);
979  av_freep(&s->mb_index2xy);
980  av_freep(&s->lambda_table);
985  av_freep(&s->input_picture);
987  av_freep(&s->dct_offset);
988 
989  if (s->picture && !s->avctx->internal->is_copy) {
990  for (i = 0; i < s->picture_count; i++) {
991  free_picture(s, &s->picture[i]);
992  }
993  }
994  av_freep(&s->picture);
995  s->context_initialized = 0;
996  s->last_picture_ptr =
997  s->next_picture_ptr =
999  s->linesize = s->uvlinesize = 0;
1000 
1001  for (i = 0; i < 3; i++)
1003 
1006 }
1007 
1009  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1010 {
1011  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1012  uint8_t index_run[MAX_RUN + 1];
1013  int last, run, level, start, end, i;
1014 
1015  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1016  if (static_store && rl->max_level[0])
1017  return;
1018 
1019  /* compute max_level[], max_run[] and index_run[] */
1020  for (last = 0; last < 2; last++) {
1021  if (last == 0) {
1022  start = 0;
1023  end = rl->last;
1024  } else {
1025  start = rl->last;
1026  end = rl->n;
1027  }
1028 
1029  memset(max_level, 0, MAX_RUN + 1);
1030  memset(max_run, 0, MAX_LEVEL + 1);
1031  memset(index_run, rl->n, MAX_RUN + 1);
1032  for (i = start; i < end; i++) {
1033  run = rl->table_run[i];
1034  level = rl->table_level[i];
1035  if (index_run[run] == rl->n)
1036  index_run[run] = i;
1037  if (level > max_level[run])
1038  max_level[run] = level;
1039  if (run > max_run[level])
1040  max_run[level] = run;
1041  }
1042  if (static_store)
1043  rl->max_level[last] = static_store[last];
1044  else
1045  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1046  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1047  if (static_store)
1048  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1049  else
1050  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1051  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1052  if (static_store)
1053  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1054  else
1055  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1056  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1057  }
1058 }
1059 
1061 {
1062  int i, q;
1063 
1064  for (q = 0; q < 32; q++) {
1065  int qmul = q * 2;
1066  int qadd = (q - 1) | 1;
1067 
1068  if (q == 0) {
1069  qmul = 1;
1070  qadd = 0;
1071  }
1072  for (i = 0; i < rl->vlc.table_size; i++) {
1073  int code = rl->vlc.table[i][0];
1074  int len = rl->vlc.table[i][1];
1075  int level, run;
1076 
1077  if (len == 0) { // illegal code
1078  run = 66;
1079  level = MAX_LEVEL;
1080  } else if (len < 0) { // more bits needed
1081  run = 0;
1082  level = code;
1083  } else {
1084  if (code == rl->n) { // esc
1085  run = 66;
1086  level = 0;
1087  } else {
1088  run = rl->table_run[code] + 1;
1089  level = rl->table_level[code] * qmul + qadd;
1090  if (code >= rl->last) run += 192;
1091  }
1092  }
1093  rl->rl_vlc[q][i].len = len;
1094  rl->rl_vlc[q][i].level = level;
1095  rl->rl_vlc[q][i].run = run;
1096  }
1097  }
1098 }
1099 
1100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1101 {
1102  int i;
1103 
1104  /* release non reference frames */
1105  for (i = 0; i < s->picture_count; i++) {
1106  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1107  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1108  (remove_current || &s->picture[i] != s->current_picture_ptr)
1109  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1110  free_frame_buffer(s, &s->picture[i]);
1111  }
1112  }
1113 }
1114 
1116 {
1117  int i;
1118 
1119  if (shared) {
1120  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1121  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1122  return i;
1123  }
1124  } else {
1125  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1126  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127  return i; // FIXME
1128  }
1129  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1130  if (s->picture[i].f.data[0] == NULL)
1131  return i;
1132  }
1133  }
1134 
1135  return AVERROR_INVALIDDATA;
1136 }
1137 
1139 {
1140  int intra, i;
1141 
1142  for (intra = 0; intra < 2; intra++) {
1143  if (s->dct_count[intra] > (1 << 16)) {
1144  for (i = 0; i < 64; i++) {
1145  s->dct_error_sum[intra][i] >>= 1;
1146  }
1147  s->dct_count[intra] >>= 1;
1148  }
1149 
1150  for (i = 0; i < 64; i++) {
1151  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1152  s->dct_count[intra] +
1153  s->dct_error_sum[intra][i] / 2) /
1154  (s->dct_error_sum[intra][i] + 1);
1155  }
1156  }
1157 }
1158 
1164 {
1165  int i;
1166  Picture *pic;
1167  s->mb_skipped = 0;
1168 
1169  /* mark & release old frames */
1170  if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1171  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1173  s->last_picture_ptr->f.data[0]) {
1174  if (s->last_picture_ptr->owner2 == s)
1176  }
1177 
1178  /* release forgotten pictures */
1179  /* if (mpeg124/h263) */
1180  if (!s->encoding) {
1181  for (i = 0; i < s->picture_count; i++) {
1182  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1183  &s->picture[i] != s->last_picture_ptr &&
1184  &s->picture[i] != s->next_picture_ptr &&
1185  s->picture[i].f.reference) {
1186  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1187  av_log(avctx, AV_LOG_ERROR,
1188  "releasing zombie picture\n");
1189  free_frame_buffer(s, &s->picture[i]);
1190  }
1191  }
1192  }
1193  }
1194 
1195  if (!s->encoding) {
1197 
1198  if (s->current_picture_ptr &&
1199  s->current_picture_ptr->f.data[0] == NULL) {
1200  // we already have a unused image
1201  // (maybe it was set before reading the header)
1202  pic = s->current_picture_ptr;
1203  } else {
1204  i = ff_find_unused_picture(s, 0);
1205  pic = &s->picture[i];
1206  }
1207 
1208  pic->f.reference = 0;
1209  if (!s->dropable) {
1210  if (s->codec_id == CODEC_ID_H264)
1211  pic->f.reference = s->picture_structure;
1212  else if (s->pict_type != AV_PICTURE_TYPE_B)
1213  pic->f.reference = 3;
1214  }
1215 
1217 
1218  if (ff_alloc_picture(s, pic, 0) < 0)
1219  return -1;
1220 
1221  s->current_picture_ptr = pic;
1222  // FIXME use only the vars from current_pic
1224  if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1225  s->codec_id == CODEC_ID_MPEG2VIDEO) {
1226  if (s->picture_structure != PICT_FRAME)
1229  }
1233  }
1234 
1236  // if (s->flags && CODEC_FLAG_QSCALE)
1237  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1239 
1241 
1242  if (s->pict_type != AV_PICTURE_TYPE_B) {
1244  if (!s->dropable)
1246  }
1247  /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1248  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1249  s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1250  s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1251  s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1252  s->pict_type, s->dropable); */
1253 
1254  if (s->codec_id != CODEC_ID_H264) {
1255  if ((s->last_picture_ptr == NULL ||
1256  s->last_picture_ptr->f.data[0] == NULL) &&
1257  (s->pict_type != AV_PICTURE_TYPE_I ||
1258  s->picture_structure != PICT_FRAME)) {
1259  if (s->pict_type != AV_PICTURE_TYPE_I)
1260  av_log(avctx, AV_LOG_ERROR,
1261  "warning: first frame is no keyframe\n");
1262  else if (s->picture_structure != PICT_FRAME)
1263  av_log(avctx, AV_LOG_INFO,
1264  "allocate dummy last picture for field based first keyframe\n");
1265 
1266  /* Allocate a dummy frame */
1267  i = ff_find_unused_picture(s, 0);
1268  s->last_picture_ptr = &s->picture[i];
1269 
1270  s->last_picture_ptr->f.reference = 3;
1272 
1273  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1274  return -1;
1276  INT_MAX, 0);
1278  INT_MAX, 1);
1279  }
1280  if ((s->next_picture_ptr == NULL ||
1281  s->next_picture_ptr->f.data[0] == NULL) &&
1282  s->pict_type == AV_PICTURE_TYPE_B) {
1283  /* Allocate a dummy frame */
1284  i = ff_find_unused_picture(s, 0);
1285  s->next_picture_ptr = &s->picture[i];
1286 
1287  s->next_picture_ptr->f.reference = 3;
1289 
1290  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1291  return -1;
1293  INT_MAX, 0);
1295  INT_MAX, 1);
1296  }
1297  }
1298 
1299  if (s->last_picture_ptr)
1301  if (s->next_picture_ptr)
1303 
1304  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1305  (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1306  if (s->next_picture_ptr)
1307  s->next_picture_ptr->owner2 = s;
1308  if (s->last_picture_ptr)
1309  s->last_picture_ptr->owner2 = s;
1310  }
1311 
1312  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1313  s->last_picture_ptr->f.data[0]));
1314 
1315  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1316  int i;
1317  for (i = 0; i < 4; i++) {
1319  s->current_picture.f.data[i] +=
1320  s->current_picture.f.linesize[i];
1321  }
1322  s->current_picture.f.linesize[i] *= 2;
1323  s->last_picture.f.linesize[i] *= 2;
1324  s->next_picture.f.linesize[i] *= 2;
1325  }
1326  }
1327 
1328  s->err_recognition = avctx->err_recognition;
1329 
1330  /* set dequantizer, we can't do it during init as
1331  * it might change for mpeg4 and we can't do it in the header
1332  * decode as init is not called for mpeg4 there yet */
1333  if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1336  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1339  } else {
1342  }
1343 
1344  if (s->dct_error_sum) {
1345  assert(s->avctx->noise_reduction && s->encoding);
1347  }
1348 
1350  return ff_xvmc_field_start(s, avctx);
1351 
1352  return 0;
1353 }
1354 
1355 /* generic function for encode/decode called after a
1356  * frame has been coded/decoded. */
1358 {
1359  int i;
1360  /* redraw edges for the frame if decoding didn't complete */
1361  // just to make sure that all data is rendered.
1363  ff_xvmc_field_end(s);
1364  } else if ((s->error_count || s->encoding) &&
1365  !s->avctx->hwaccel &&
1367  s->unrestricted_mv &&
1369  !s->intra_only &&
1370  !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1374  s->h_edge_pos, s->v_edge_pos,
1376  EDGE_TOP | EDGE_BOTTOM);
1378  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1379  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1380  EDGE_TOP | EDGE_BOTTOM);
1382  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1383  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1384  EDGE_TOP | EDGE_BOTTOM);
1385  }
1386 
1387  emms_c();
1388 
1389  s->last_pict_type = s->pict_type;
1391  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1393  }
1394 #if 0
1395  /* copy back current_picture variables */
1396  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1397  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1398  s->picture[i] = s->current_picture;
1399  break;
1400  }
1401  }
1402  assert(i < MAX_PICTURE_COUNT);
1403 #endif
1404 
1405  if (s->encoding) {
1406  /* release non-reference frames */
1407  for (i = 0; i < s->picture_count; i++) {
1408  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1409  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1410  free_frame_buffer(s, &s->picture[i]);
1411  }
1412  }
1413  }
1414  // clear copies, to avoid confusion
1415 #if 0
1416  memset(&s->last_picture, 0, sizeof(Picture));
1417  memset(&s->next_picture, 0, sizeof(Picture));
1418  memset(&s->current_picture, 0, sizeof(Picture));
1419 #endif
1421 
1422  if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1424  }
1425 }
1426 
1434 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1435  int w, int h, int stride, int color)
1436 {
1437  int x, y, fr, f;
1438 
1439  sx = av_clip(sx, 0, w - 1);
1440  sy = av_clip(sy, 0, h - 1);
1441  ex = av_clip(ex, 0, w - 1);
1442  ey = av_clip(ey, 0, h - 1);
1443 
1444  buf[sy * stride + sx] += color;
1445 
1446  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1447  if (sx > ex) {
1448  FFSWAP(int, sx, ex);
1449  FFSWAP(int, sy, ey);
1450  }
1451  buf += sx + sy * stride;
1452  ex -= sx;
1453  f = ((ey - sy) << 16) / ex;
1454  for (x = 0; x = ex; x++) {
1455  y = (x * f) >> 16;
1456  fr = (x * f) & 0xFFFF;
1457  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1458  buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1459  }
1460  } else {
1461  if (sy > ey) {
1462  FFSWAP(int, sx, ex);
1463  FFSWAP(int, sy, ey);
1464  }
1465  buf += sx + sy * stride;
1466  ey -= sy;
1467  if (ey)
1468  f = ((ex - sx) << 16) / ey;
1469  else
1470  f = 0;
1471  for (y = 0; y = ey; y++) {
1472  x = (y * f) >> 16;
1473  fr = (y * f) & 0xFFFF;
1474  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1475  buf[y * stride + x + 1] += (color * fr ) >> 16;
1476  }
1477  }
1478 }
1479 
1487 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1488  int ey, int w, int h, int stride, int color)
1489 {
1490  int dx,dy;
1491 
1492  sx = av_clip(sx, -100, w + 100);
1493  sy = av_clip(sy, -100, h + 100);
1494  ex = av_clip(ex, -100, w + 100);
1495  ey = av_clip(ey, -100, h + 100);
1496 
1497  dx = ex - sx;
1498  dy = ey - sy;
1499 
1500  if (dx * dx + dy * dy > 3 * 3) {
1501  int rx = dx + dy;
1502  int ry = -dx + dy;
1503  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1504 
1505  // FIXME subpixel accuracy
1506  rx = ROUNDED_DIV(rx * 3 << 4, length);
1507  ry = ROUNDED_DIV(ry * 3 << 4, length);
1508 
1509  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1510  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1511  }
1512  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1513 }
1514 
1519 {
1520  if (s->avctx->hwaccel || !pict || !pict->mb_type)
1521  return;
1522 
1524  int x,y;
1525 
1526  av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1527  switch (pict->pict_type) {
1528  case AV_PICTURE_TYPE_I:
1529  av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1530  break;
1531  case AV_PICTURE_TYPE_P:
1532  av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1533  break;
1534  case AV_PICTURE_TYPE_B:
1535  av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1536  break;
1537  case AV_PICTURE_TYPE_S:
1538  av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1539  break;
1540  case AV_PICTURE_TYPE_SI:
1541  av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1542  break;
1543  case AV_PICTURE_TYPE_SP:
1544  av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1545  break;
1546  }
1547  for (y = 0; y < s->mb_height; y++) {
1548  for (x = 0; x < s->mb_width; x++) {
1549  if (s->avctx->debug & FF_DEBUG_SKIP) {
1550  int count = s->mbskip_table[x + y * s->mb_stride];
1551  if (count > 9)
1552  count = 9;
1553  av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1554  }
1555  if (s->avctx->debug & FF_DEBUG_QP) {
1556  av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1557  pict->qscale_table[x + y * s->mb_stride]);
1558  }
1559  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1560  int mb_type = pict->mb_type[x + y * s->mb_stride];
1561  // Type & MV direction
1562  if (IS_PCM(mb_type))
1563  av_log(s->avctx, AV_LOG_DEBUG, "P");
1564  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1565  av_log(s->avctx, AV_LOG_DEBUG, "A");
1566  else if (IS_INTRA4x4(mb_type))
1567  av_log(s->avctx, AV_LOG_DEBUG, "i");
1568  else if (IS_INTRA16x16(mb_type))
1569  av_log(s->avctx, AV_LOG_DEBUG, "I");
1570  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1571  av_log(s->avctx, AV_LOG_DEBUG, "d");
1572  else if (IS_DIRECT(mb_type))
1573  av_log(s->avctx, AV_LOG_DEBUG, "D");
1574  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1575  av_log(s->avctx, AV_LOG_DEBUG, "g");
1576  else if (IS_GMC(mb_type))
1577  av_log(s->avctx, AV_LOG_DEBUG, "G");
1578  else if (IS_SKIP(mb_type))
1579  av_log(s->avctx, AV_LOG_DEBUG, "S");
1580  else if (!USES_LIST(mb_type, 1))
1581  av_log(s->avctx, AV_LOG_DEBUG, ">");
1582  else if (!USES_LIST(mb_type, 0))
1583  av_log(s->avctx, AV_LOG_DEBUG, "<");
1584  else {
1585  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1586  av_log(s->avctx, AV_LOG_DEBUG, "X");
1587  }
1588 
1589  // segmentation
1590  if (IS_8X8(mb_type))
1591  av_log(s->avctx, AV_LOG_DEBUG, "+");
1592  else if (IS_16X8(mb_type))
1593  av_log(s->avctx, AV_LOG_DEBUG, "-");
1594  else if (IS_8X16(mb_type))
1595  av_log(s->avctx, AV_LOG_DEBUG, "|");
1596  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1597  av_log(s->avctx, AV_LOG_DEBUG, " ");
1598  else
1599  av_log(s->avctx, AV_LOG_DEBUG, "?");
1600 
1601 
1602  if (IS_INTERLACED(mb_type))
1603  av_log(s->avctx, AV_LOG_DEBUG, "=");
1604  else
1605  av_log(s->avctx, AV_LOG_DEBUG, " ");
1606  }
1607  // av_log(s->avctx, AV_LOG_DEBUG, " ");
1608  }
1609  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1610  }
1611  }
1612 
1613  if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1614  (s->avctx->debug_mv)) {
1615  const int shift = 1 + s->quarter_sample;
1616  int mb_y;
1617  uint8_t *ptr;
1618  int i;
1619  int h_chroma_shift, v_chroma_shift, block_height;
1620  const int width = s->avctx->width;
1621  const int height = s->avctx->height;
1622  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1623  const int mv_stride = (s->mb_width << mv_sample_log2) +
1624  (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1625  s->low_delay = 0; // needed to see the vectors without trashing the buffers
1626 
1628  &h_chroma_shift, &v_chroma_shift);
1629  for (i = 0; i < 3; i++) {
1630  memcpy(s->visualization_buffer[i], pict->data[i],
1631  (i == 0) ? pict->linesize[i] * height:
1632  pict->linesize[i] * height >> v_chroma_shift);
1633  pict->data[i] = s->visualization_buffer[i];
1634  }
1635  pict->type = FF_BUFFER_TYPE_COPY;
1636  ptr = pict->data[0];
1637  block_height = 16 >> v_chroma_shift;
1638 
1639  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1640  int mb_x;
1641  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1642  const int mb_index = mb_x + mb_y * s->mb_stride;
1643  if ((s->avctx->debug_mv) && pict->motion_val) {
1644  int type;
1645  for (type = 0; type < 3; type++) {
1646  int direction = 0;
1647  switch (type) {
1648  case 0:
1649  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1650  (pict->pict_type!= AV_PICTURE_TYPE_P))
1651  continue;
1652  direction = 0;
1653  break;
1654  case 1:
1655  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1656  (pict->pict_type!= AV_PICTURE_TYPE_B))
1657  continue;
1658  direction = 0;
1659  break;
1660  case 2:
1661  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1662  (pict->pict_type!= AV_PICTURE_TYPE_B))
1663  continue;
1664  direction = 1;
1665  break;
1666  }
1667  if (!USES_LIST(pict->mb_type[mb_index], direction))
1668  continue;
1669 
1670  if (IS_8X8(pict->mb_type[mb_index])) {
1671  int i;
1672  for (i = 0; i < 4; i++) {
1673  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1674  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1675  int xy = (mb_x * 2 + (i & 1) +
1676  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1677  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1678  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1679  draw_arrow(ptr, sx, sy, mx, my, width,
1680  height, s->linesize, 100);
1681  }
1682  } else if (IS_16X8(pict->mb_type[mb_index])) {
1683  int i;
1684  for (i = 0; i < 2; i++) {
1685  int sx = mb_x * 16 + 8;
1686  int sy = mb_y * 16 + 4 + 8 * i;
1687  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1688  int mx = (pict->motion_val[direction][xy][0] >> shift);
1689  int my = (pict->motion_val[direction][xy][1] >> shift);
1690 
1691  if (IS_INTERLACED(pict->mb_type[mb_index]))
1692  my *= 2;
1693 
1694  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1695  height, s->linesize, 100);
1696  }
1697  } else if (IS_8X16(pict->mb_type[mb_index])) {
1698  int i;
1699  for (i = 0; i < 2; i++) {
1700  int sx = mb_x * 16 + 4 + 8 * i;
1701  int sy = mb_y * 16 + 8;
1702  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1703  int mx = pict->motion_val[direction][xy][0] >> shift;
1704  int my = pict->motion_val[direction][xy][1] >> shift;
1705 
1706  if (IS_INTERLACED(pict->mb_type[mb_index]))
1707  my *= 2;
1708 
1709  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1710  height, s->linesize, 100);
1711  }
1712  } else {
1713  int sx = mb_x * 16 + 8;
1714  int sy = mb_y * 16 + 8;
1715  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1716  int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1717  int my = pict->motion_val[direction][xy][1] >> shift + sy;
1718  draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1719  }
1720  }
1721  }
1722  if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1723  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1724  0x0101010101010101ULL;
1725  int y;
1726  for (y = 0; y < block_height; y++) {
1727  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1728  (block_height * mb_y + y) *
1729  pict->linesize[1]) = c;
1730  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1731  (block_height * mb_y + y) *
1732  pict->linesize[2]) = c;
1733  }
1734  }
1735  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1736  pict->motion_val) {
1737  int mb_type = pict->mb_type[mb_index];
1738  uint64_t u,v;
1739  int y;
1740 #define COLOR(theta, r) \
1741  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1742  v = (int)(128 + r * sin(theta * 3.141592 / 180));
1743 
1744 
1745  u = v = 128;
1746  if (IS_PCM(mb_type)) {
1747  COLOR(120, 48)
1748  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1749  IS_INTRA16x16(mb_type)) {
1750  COLOR(30, 48)
1751  } else if (IS_INTRA4x4(mb_type)) {
1752  COLOR(90, 48)
1753  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1754  // COLOR(120, 48)
1755  } else if (IS_DIRECT(mb_type)) {
1756  COLOR(150, 48)
1757  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1758  COLOR(170, 48)
1759  } else if (IS_GMC(mb_type)) {
1760  COLOR(190, 48)
1761  } else if (IS_SKIP(mb_type)) {
1762  // COLOR(180, 48)
1763  } else if (!USES_LIST(mb_type, 1)) {
1764  COLOR(240, 48)
1765  } else if (!USES_LIST(mb_type, 0)) {
1766  COLOR(0, 48)
1767  } else {
1768  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1769  COLOR(300,48)
1770  }
1771 
1772  u *= 0x0101010101010101ULL;
1773  v *= 0x0101010101010101ULL;
1774  for (y = 0; y < block_height; y++) {
1775  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1776  (block_height * mb_y + y) * pict->linesize[1]) = u;
1777  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1778  (block_height * mb_y + y) * pict->linesize[2]) = v;
1779  }
1780 
1781  // segmentation
1782  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1783  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1784  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1785  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1786  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1787  }
1788  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1789  for (y = 0; y < 16; y++)
1790  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1791  pict->linesize[0]] ^= 0x80;
1792  }
1793  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1794  int dm = 1 << (mv_sample_log2 - 2);
1795  for (i = 0; i < 4; i++) {
1796  int sx = mb_x * 16 + 8 * (i & 1);
1797  int sy = mb_y * 16 + 8 * (i >> 1);
1798  int xy = (mb_x * 2 + (i & 1) +
1799  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1800  // FIXME bidir
1801  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1802  if (mv[0] != mv[dm] ||
1803  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1804  for (y = 0; y < 8; y++)
1805  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1806  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1807  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1808  pict->linesize[0]) ^= 0x8080808080808080ULL;
1809  }
1810  }
1811 
1812  if (IS_INTERLACED(mb_type) &&
1813  s->codec_id == CODEC_ID_H264) {
1814  // hmm
1815  }
1816  }
1817  s->mbskip_table[mb_index] = 0;
1818  }
1819  }
1820  }
1821 }
1822 
1823 static inline int hpel_motion_lowres(MpegEncContext *s,
1824  uint8_t *dest, uint8_t *src,
1825  int field_based, int field_select,
1826  int src_x, int src_y,
1827  int width, int height, int stride,
1828  int h_edge_pos, int v_edge_pos,
1829  int w, int h, h264_chroma_mc_func *pix_op,
1830  int motion_x, int motion_y)
1831 {
1832  const int lowres = s->avctx->lowres;
1833  const int op_index = FFMIN(lowres, 2);
1834  const int s_mask = (2 << lowres) - 1;
1835  int emu = 0;
1836  int sx, sy;
1837 
1838  if (s->quarter_sample) {
1839  motion_x /= 2;
1840  motion_y /= 2;
1841  }
1842 
1843  sx = motion_x & s_mask;
1844  sy = motion_y & s_mask;
1845  src_x += motion_x >> lowres + 1;
1846  src_y += motion_y >> lowres + 1;
1847 
1848  src += src_y * stride + src_x;
1849 
1850  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1851  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1852  s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1853  (h + 1) << field_based, src_x,
1854  src_y << field_based,
1855  h_edge_pos,
1856  v_edge_pos);
1857  src = s->edge_emu_buffer;
1858  emu = 1;
1859  }
1860 
1861  sx = (sx << 2) >> lowres;
1862  sy = (sy << 2) >> lowres;
1863  if (field_select)
1864  src += s->linesize;
1865  pix_op[op_index](dest, src, stride, h, sx, sy);
1866  return emu;
1867 }
1868 
1869 /* apply one mpeg motion vector to the three components */
1871  uint8_t *dest_y,
1872  uint8_t *dest_cb,
1873  uint8_t *dest_cr,
1874  int field_based,
1875  int bottom_field,
1876  int field_select,
1877  uint8_t **ref_picture,
1878  h264_chroma_mc_func *pix_op,
1879  int motion_x, int motion_y,
1880  int h, int mb_y)
1881 {
1882  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1883  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1884  uvsx, uvsy;
1885  const int lowres = s->avctx->lowres;
1886  const int op_index = FFMIN(lowres, 2);
1887  const int block_s = 8>>lowres;
1888  const int s_mask = (2 << lowres) - 1;
1889  const int h_edge_pos = s->h_edge_pos >> lowres;
1890  const int v_edge_pos = s->v_edge_pos >> lowres;
1891  linesize = s->current_picture.f.linesize[0] << field_based;
1892  uvlinesize = s->current_picture.f.linesize[1] << field_based;
1893 
1894  // FIXME obviously not perfect but qpel will not work in lowres anyway
1895  if (s->quarter_sample) {
1896  motion_x /= 2;
1897  motion_y /= 2;
1898  }
1899 
1900  if (field_based) {
1901  motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1902  }
1903 
1904  sx = motion_x & s_mask;
1905  sy = motion_y & s_mask;
1906  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1907  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1908 
1909  if (s->out_format == FMT_H263) {
1910  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1911  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1912  uvsrc_x = src_x >> 1;
1913  uvsrc_y = src_y >> 1;
1914  } else if (s->out_format == FMT_H261) {
1915  // even chroma mv's are full pel in H261
1916  mx = motion_x / 4;
1917  my = motion_y / 4;
1918  uvsx = (2 * mx) & s_mask;
1919  uvsy = (2 * my) & s_mask;
1920  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1921  uvsrc_y = mb_y * block_s + (my >> lowres);
1922  } else {
1923  mx = motion_x / 2;
1924  my = motion_y / 2;
1925  uvsx = mx & s_mask;
1926  uvsy = my & s_mask;
1927  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1928  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1929  }
1930 
1931  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1932  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1933  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1934 
1935  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1936  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1937  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1938  s->linesize, 17, 17 + field_based,
1939  src_x, src_y << field_based, h_edge_pos,
1940  v_edge_pos);
1941  ptr_y = s->edge_emu_buffer;
1942  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1943  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1944  s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1945  9 + field_based,
1946  uvsrc_x, uvsrc_y << field_based,
1947  h_edge_pos >> 1, v_edge_pos >> 1);
1948  s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1949  9 + field_based,
1950  uvsrc_x, uvsrc_y << field_based,
1951  h_edge_pos >> 1, v_edge_pos >> 1);
1952  ptr_cb = uvbuf;
1953  ptr_cr = uvbuf + 16;
1954  }
1955  }
1956 
1957  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1958  if (bottom_field) {
1959  dest_y += s->linesize;
1960  dest_cb += s->uvlinesize;
1961  dest_cr += s->uvlinesize;
1962  }
1963 
1964  if (field_select) {
1965  ptr_y += s->linesize;
1966  ptr_cb += s->uvlinesize;
1967  ptr_cr += s->uvlinesize;
1968  }
1969 
1970  sx = (sx << 2) >> lowres;
1971  sy = (sy << 2) >> lowres;
1972  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1973 
1974  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1975  uvsx = (uvsx << 2) >> lowres;
1976  uvsy = (uvsy << 2) >> lowres;
1977  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1978  uvsx, uvsy);
1979  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1980  uvsx, uvsy);
1981  }
1982  // FIXME h261 lowres loop filter
1983 }
1984 
1986  uint8_t *dest_cb, uint8_t *dest_cr,
1987  uint8_t **ref_picture,
1988  h264_chroma_mc_func * pix_op,
1989  int mx, int my)
1990 {
1991  const int lowres = s->avctx->lowres;
1992  const int op_index = FFMIN(lowres, 2);
1993  const int block_s = 8 >> lowres;
1994  const int s_mask = (2 << lowres) - 1;
1995  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1996  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1997  int emu = 0, src_x, src_y, offset, sx, sy;
1998  uint8_t *ptr;
1999 
2000  if (s->quarter_sample) {
2001  mx /= 2;
2002  my /= 2;
2003  }
2004 
2005  /* In case of 8X8, we construct a single chroma motion vector
2006  with a special rounding */
2007  mx = ff_h263_round_chroma(mx);
2008  my = ff_h263_round_chroma(my);
2009 
2010  sx = mx & s_mask;
2011  sy = my & s_mask;
2012  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2013  src_y = s->mb_y * block_s + (my >> lowres + 1);
2014 
2015  offset = src_y * s->uvlinesize + src_x;
2016  ptr = ref_picture[1] + offset;
2017  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2018  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2019  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2021  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2022  ptr = s->edge_emu_buffer;
2023  emu = 1;
2024  }
2025  }
2026  sx = (sx << 2) >> lowres;
2027  sy = (sy << 2) >> lowres;
2028  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2029 
2030  ptr = ref_picture[2] + offset;
2031  if (emu) {
2032  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2033  src_x, src_y, h_edge_pos, v_edge_pos);
2034  ptr = s->edge_emu_buffer;
2035  }
2036  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2037 }
2038 
2050 static inline void MPV_motion_lowres(MpegEncContext *s,
2051  uint8_t *dest_y, uint8_t *dest_cb,
2052  uint8_t *dest_cr,
2053  int dir, uint8_t **ref_picture,
2054  h264_chroma_mc_func *pix_op)
2055 {
2056  int mx, my;
2057  int mb_x, mb_y, i;
2058  const int lowres = s->avctx->lowres;
2059  const int block_s = 8 >>lowres;
2060 
2061  mb_x = s->mb_x;
2062  mb_y = s->mb_y;
2063 
2064  switch (s->mv_type) {
2065  case MV_TYPE_16X16:
2066  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2067  0, 0, 0,
2068  ref_picture, pix_op,
2069  s->mv[dir][0][0], s->mv[dir][0][1],
2070  2 * block_s, mb_y);
2071  break;
2072  case MV_TYPE_8X8:
2073  mx = 0;
2074  my = 0;
2075  for (i = 0; i < 4; i++) {
2076  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2077  s->linesize) * block_s,
2078  ref_picture[0], 0, 0,
2079  (2 * mb_x + (i & 1)) * block_s,
2080  (2 * mb_y + (i >> 1)) * block_s,
2081  s->width, s->height, s->linesize,
2082  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2083  block_s, block_s, pix_op,
2084  s->mv[dir][i][0], s->mv[dir][i][1]);
2085 
2086  mx += s->mv[dir][i][0];
2087  my += s->mv[dir][i][1];
2088  }
2089 
2090  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2091  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2092  pix_op, mx, my);
2093  break;
2094  case MV_TYPE_FIELD:
2095  if (s->picture_structure == PICT_FRAME) {
2096  /* top field */
2097  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2098  1, 0, s->field_select[dir][0],
2099  ref_picture, pix_op,
2100  s->mv[dir][0][0], s->mv[dir][0][1],
2101  block_s, mb_y);
2102  /* bottom field */
2103  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2104  1, 1, s->field_select[dir][1],
2105  ref_picture, pix_op,
2106  s->mv[dir][1][0], s->mv[dir][1][1],
2107  block_s, mb_y);
2108  } else {
2109  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2110  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2111  ref_picture = s->current_picture_ptr->f.data;
2112 
2113  }
2114  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2115  0, 0, s->field_select[dir][0],
2116  ref_picture, pix_op,
2117  s->mv[dir][0][0],
2118  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2119  }
2120  break;
2121  case MV_TYPE_16X8:
2122  for (i = 0; i < 2; i++) {
2123  uint8_t **ref2picture;
2124 
2125  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2126  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2127  ref2picture = ref_picture;
2128  } else {
2129  ref2picture = s->current_picture_ptr->f.data;
2130  }
2131 
2132  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2133  0, 0, s->field_select[dir][i],
2134  ref2picture, pix_op,
2135  s->mv[dir][i][0], s->mv[dir][i][1] +
2136  2 * block_s * i, block_s, mb_y >> 1);
2137 
2138  dest_y += 2 * block_s * s->linesize;
2139  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2140  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2141  }
2142  break;
2143  case MV_TYPE_DMV:
2144  if (s->picture_structure == PICT_FRAME) {
2145  for (i = 0; i < 2; i++) {
2146  int j;
2147  for (j = 0; j < 2; j++) {
2148  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2149  1, j, j ^ i,
2150  ref_picture, pix_op,
2151  s->mv[dir][2 * i + j][0],
2152  s->mv[dir][2 * i + j][1],
2153  block_s, mb_y);
2154  }
2155  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2156  }
2157  } else {
2158  for (i = 0; i < 2; i++) {
2159  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2160  0, 0, s->picture_structure != i + 1,
2161  ref_picture, pix_op,
2162  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2163  2 * block_s, mb_y >> 1);
2164 
2165  // after put we make avg of the same block
2166  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2167 
2168  // opposite parity is always in the same
2169  // frame if this is second field
2170  if (!s->first_field) {
2171  ref_picture = s->current_picture_ptr->f.data;
2172  }
2173  }
2174  }
2175  break;
2176  default:
2177  assert(0);
2178  }
2179 }
2180 
2185 {
2186  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2187  int my, off, i, mvs;
2188 
2189  if (s->picture_structure != PICT_FRAME) goto unhandled;
2190 
2191  switch (s->mv_type) {
2192  case MV_TYPE_16X16:
2193  mvs = 1;
2194  break;
2195  case MV_TYPE_16X8:
2196  mvs = 2;
2197  break;
2198  case MV_TYPE_8X8:
2199  mvs = 4;
2200  break;
2201  default:
2202  goto unhandled;
2203  }
2204 
2205  for (i = 0; i < mvs; i++) {
2206  my = s->mv[dir][i][1]<<qpel_shift;
2207  my_max = FFMAX(my_max, my);
2208  my_min = FFMIN(my_min, my);
2209  }
2210 
2211  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2212 
2213  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2214 unhandled:
2215  return s->mb_height-1;
2216 }
2217 
2218 /* put block[] to dest[] */
2219 static inline void put_dct(MpegEncContext *s,
2220  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2221 {
2222  s->dct_unquantize_intra(s, block, i, qscale);
2223  s->dsp.idct_put (dest, line_size, block);
2224 }
2225 
2226 /* add block[] to dest[] */
2227 static inline void add_dct(MpegEncContext *s,
2228  DCTELEM *block, int i, uint8_t *dest, int line_size)
2229 {
2230  if (s->block_last_index[i] >= 0) {
2231  s->dsp.idct_add (dest, line_size, block);
2232  }
2233 }
2234 
2235 static inline void add_dequant_dct(MpegEncContext *s,
2236  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2237 {
2238  if (s->block_last_index[i] >= 0) {
2239  s->dct_unquantize_inter(s, block, i, qscale);
2240 
2241  s->dsp.idct_add (dest, line_size, block);
2242  }
2243 }
2244 
2249 {
2250  int wrap = s->b8_stride;
2251  int xy = s->block_index[0];
2252 
2253  s->dc_val[0][xy ] =
2254  s->dc_val[0][xy + 1 ] =
2255  s->dc_val[0][xy + wrap] =
2256  s->dc_val[0][xy + 1 + wrap] = 1024;
2257  /* ac pred */
2258  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2259  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2260  if (s->msmpeg4_version>=3) {
2261  s->coded_block[xy ] =
2262  s->coded_block[xy + 1 ] =
2263  s->coded_block[xy + wrap] =
2264  s->coded_block[xy + 1 + wrap] = 0;
2265  }
2266  /* chroma */
2267  wrap = s->mb_stride;
2268  xy = s->mb_x + s->mb_y * wrap;
2269  s->dc_val[1][xy] =
2270  s->dc_val[2][xy] = 1024;
2271  /* ac pred */
2272  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2273  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2274 
2275  s->mbintra_table[xy]= 0;
2276 }
2277 
2278 /* generic function called after a macroblock has been parsed by the
2279  decoder or after it has been encoded by the encoder.
2280 
2281  Important variables used:
2282  s->mb_intra : true if intra macroblock
2283  s->mv_dir : motion vector direction
2284  s->mv_type : motion vector type
2285  s->mv : motion vector
2286  s->interlaced_dct : true if interlaced dct used (mpeg2)
2287  */
2288 static av_always_inline
2290  int lowres_flag, int is_mpeg12)
2291 {
2292  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2294  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2295  return;
2296  }
2297 
2298  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2299  /* save DCT coefficients */
2300  int i,j;
2301  DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2302  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2303  for(i=0; i<6; i++){
2304  for(j=0; j<64; j++){
2305  *dct++ = block[i][s->dsp.idct_permutation[j]];
2306  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2307  }
2308  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2309  }
2310  }
2311 
2312  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2313 
2314  /* update DC predictors for P macroblocks */
2315  if (!s->mb_intra) {
2316  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2317  if(s->mbintra_table[mb_xy])
2319  } else {
2320  s->last_dc[0] =
2321  s->last_dc[1] =
2322  s->last_dc[2] = 128 << s->intra_dc_precision;
2323  }
2324  }
2325  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2326  s->mbintra_table[mb_xy]=1;
2327 
2328  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2329  uint8_t *dest_y, *dest_cb, *dest_cr;
2330  int dct_linesize, dct_offset;
2331  op_pixels_func (*op_pix)[4];
2332  qpel_mc_func (*op_qpix)[16];
2333  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2334  const int uvlinesize = s->current_picture.f.linesize[1];
2335  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2336  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2337 
2338  /* avoid copy if macroblock skipped in last frame too */
2339  /* skip only during decoding as we might trash the buffers during encoding a bit */
2340  if(!s->encoding){
2341  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2342 
2343  if (s->mb_skipped) {
2344  s->mb_skipped= 0;
2345  assert(s->pict_type!=AV_PICTURE_TYPE_I);
2346  *mbskip_ptr = 1;
2347  } else if(!s->current_picture.f.reference) {
2348  *mbskip_ptr = 1;
2349  } else{
2350  *mbskip_ptr = 0; /* not skipped */
2351  }
2352  }
2353 
2354  dct_linesize = linesize << s->interlaced_dct;
2355  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2356 
2357  if(readable){
2358  dest_y= s->dest[0];
2359  dest_cb= s->dest[1];
2360  dest_cr= s->dest[2];
2361  }else{
2362  dest_y = s->b_scratchpad;
2363  dest_cb= s->b_scratchpad+16*linesize;
2364  dest_cr= s->b_scratchpad+32*linesize;
2365  }
2366 
2367  if (!s->mb_intra) {
2368  /* motion handling */
2369  /* decoding or more than one mb_type (MC was already done otherwise) */
2370  if(!s->encoding){
2371 
2373  if (s->mv_dir & MV_DIR_FORWARD) {
2375  }
2376  if (s->mv_dir & MV_DIR_BACKWARD) {
2378  }
2379  }
2380 
2381  if(lowres_flag){
2383 
2384  if (s->mv_dir & MV_DIR_FORWARD) {
2385  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2386  op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2387  }
2388  if (s->mv_dir & MV_DIR_BACKWARD) {
2389  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2390  }
2391  }else{
2392  op_qpix= s->me.qpel_put;
2393  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2394  op_pix = s->dsp.put_pixels_tab;
2395  }else{
2396  op_pix = s->dsp.put_no_rnd_pixels_tab;
2397  }
2398  if (s->mv_dir & MV_DIR_FORWARD) {
2399  MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2400  op_pix = s->dsp.avg_pixels_tab;
2401  op_qpix= s->me.qpel_avg;
2402  }
2403  if (s->mv_dir & MV_DIR_BACKWARD) {
2404  MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2405  }
2406  }
2407  }
2408 
2409  /* skip dequant / idct if we are really late ;) */
2410  if(s->avctx->skip_idct){
2413  || s->avctx->skip_idct >= AVDISCARD_ALL)
2414  goto skip_idct;
2415  }
2416 
2417  /* add dct residue */
2419  || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2420  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2421  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2422  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2423  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2424 
2425  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2426  if (s->chroma_y_shift){
2427  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2428  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2429  }else{
2430  dct_linesize >>= 1;
2431  dct_offset >>=1;
2432  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2433  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2434  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2435  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2436  }
2437  }
2438  } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2439  add_dct(s, block[0], 0, dest_y , dct_linesize);
2440  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2441  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2442  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2443 
2444  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2445  if(s->chroma_y_shift){//Chroma420
2446  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2447  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2448  }else{
2449  //chroma422
2450  dct_linesize = uvlinesize << s->interlaced_dct;
2451  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2452 
2453  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2454  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2455  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2456  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2457  if(!s->chroma_x_shift){//Chroma444
2458  add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2459  add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2460  add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2461  add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2462  }
2463  }
2464  }//fi gray
2465  }
2467  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2468  }
2469  } else {
2470  /* dct only in intra block */
2472  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2473  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2474  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2475  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2476 
2477  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2478  if(s->chroma_y_shift){
2479  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2480  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2481  }else{
2482  dct_offset >>=1;
2483  dct_linesize >>=1;
2484  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2485  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2486  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2487  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2488  }
2489  }
2490  }else{
2491  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2492  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2493  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2494  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2495 
2496  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2497  if(s->chroma_y_shift){
2498  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2499  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2500  }else{
2501 
2502  dct_linesize = uvlinesize << s->interlaced_dct;
2503  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2504 
2505  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2506  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2507  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2508  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2509  if(!s->chroma_x_shift){//Chroma444
2510  s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2511  s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2512  s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2513  s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2514  }
2515  }
2516  }//gray
2517  }
2518  }
2519 skip_idct:
2520  if(!readable){
2521  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2522  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2523  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2524  }
2525  }
2526 }
2527 
2529 #if !CONFIG_SMALL
2530  if(s->out_format == FMT_MPEG1) {
2531  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2532  else MPV_decode_mb_internal(s, block, 0, 1);
2533  } else
2534 #endif
2535  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2536  else MPV_decode_mb_internal(s, block, 0, 0);
2537 }
2538 
2542 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2543  const int field_pic= s->picture_structure != PICT_FRAME;
2544  if(field_pic){
2545  h <<= 1;
2546  y <<= 1;
2547  }
2548 
2549  if (!s->avctx->hwaccel
2551  && s->unrestricted_mv
2553  && !s->intra_only
2554  && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2555  int sides = 0, edge_h;
2558  if (y==0) sides |= EDGE_TOP;
2559  if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2560 
2561  edge_h= FFMIN(h, s->v_edge_pos - y);
2562 
2563  s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2564  s->linesize, s->h_edge_pos, edge_h,
2565  EDGE_WIDTH, EDGE_WIDTH, sides);
2566  s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2567  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2568  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2569  s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2570  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2571  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2572  }
2573 
2574  h= FFMIN(h, s->avctx->height - y);
2575 
2576  if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2577 
2578  if (s->avctx->draw_horiz_band) {
2579  AVFrame *src;
2580  int offset[AV_NUM_DATA_POINTERS];
2581  int i;
2582 
2584  src= (AVFrame*)s->current_picture_ptr;
2585  else if(s->last_picture_ptr)
2586  src= (AVFrame*)s->last_picture_ptr;
2587  else
2588  return;
2589 
2591  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2592  offset[i] = 0;
2593  }else{
2594  offset[0]= y * s->linesize;
2595  offset[1]=
2596  offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2597  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2598  offset[i] = 0;
2599  }
2600 
2601  emms_c();
2602 
2603  s->avctx->draw_horiz_band(s->avctx, src, offset,
2604  y, s->picture_structure, h);
2605  }
2606 }
2607 
2608 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2609  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2610  const int uvlinesize = s->current_picture.f.linesize[1];
2611  const int mb_size= 4 - s->avctx->lowres;
2612 
2613  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2614  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2615  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2616  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2617  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2618  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2619  //block_index is not used by mpeg2, so it is not affected by chroma_format
2620 
2621  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2622  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2623  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2624 
2626  {
2627  if(s->picture_structure==PICT_FRAME){
2628  s->dest[0] += s->mb_y * linesize << mb_size;
2629  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2630  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2631  }else{
2632  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2633  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2634  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2635  assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2636  }
2637  }
2638 }
2639 
2641  int i;
2642  MpegEncContext *s = avctx->priv_data;
2643 
2644  if(s==NULL || s->picture==NULL)
2645  return;
2646 
2647  for(i=0; i<s->picture_count; i++){
2648  if (s->picture[i].f.data[0] &&
2649  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2650  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2651  free_frame_buffer(s, &s->picture[i]);
2652  }
2654 
2655  s->mb_x= s->mb_y= 0;
2656 
2657  s->parse_context.state= -1;
2659  s->parse_context.overread= 0;
2661  s->parse_context.index= 0;
2662  s->parse_context.last_index= 0;
2663  s->bitstream_buffer_size=0;
2664  s->pp_time=0;
2665 }
2666 
2668  DCTELEM *block, int n, int qscale)
2669 {
2670  int i, level, nCoeffs;
2671  const uint16_t *quant_matrix;
2672 
2673  nCoeffs= s->block_last_index[n];
2674 
2675  if (n < 4)
2676  block[0] = block[0] * s->y_dc_scale;
2677  else
2678  block[0] = block[0] * s->c_dc_scale;
2679  /* XXX: only mpeg1 */
2680  quant_matrix = s->intra_matrix;
2681  for(i=1;i<=nCoeffs;i++) {
2682  int j= s->intra_scantable.permutated[i];
2683  level = block[j];
2684  if (level) {
2685  if (level < 0) {
2686  level = -level;
2687  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2688  level = (level - 1) | 1;
2689  level = -level;
2690  } else {
2691  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2692  level = (level - 1) | 1;
2693  }
2694  block[j] = level;
2695  }
2696  }
2697 }
2698 
2700  DCTELEM *block, int n, int qscale)
2701 {
2702  int i, level, nCoeffs;
2703  const uint16_t *quant_matrix;
2704 
2705  nCoeffs= s->block_last_index[n];
2706 
2707  quant_matrix = s->inter_matrix;
2708  for(i=0; i<=nCoeffs; i++) {
2709  int j= s->intra_scantable.permutated[i];
2710  level = block[j];
2711  if (level) {
2712  if (level < 0) {
2713  level = -level;
2714  level = (((level << 1) + 1) * qscale *
2715  ((int) (quant_matrix[j]))) >> 4;
2716  level = (level - 1) | 1;
2717  level = -level;
2718  } else {
2719  level = (((level << 1) + 1) * qscale *
2720  ((int) (quant_matrix[j]))) >> 4;
2721  level = (level - 1) | 1;
2722  }
2723  block[j] = level;
2724  }
2725  }
2726 }
2727 
2729  DCTELEM *block, int n, int qscale)
2730 {
2731  int i, level, nCoeffs;
2732  const uint16_t *quant_matrix;
2733 
2734  if(s->alternate_scan) nCoeffs= 63;
2735  else nCoeffs= s->block_last_index[n];
2736 
2737  if (n < 4)
2738  block[0] = block[0] * s->y_dc_scale;
2739  else
2740  block[0] = block[0] * s->c_dc_scale;
2741  quant_matrix = s->intra_matrix;
2742  for(i=1;i<=nCoeffs;i++) {
2743  int j= s->intra_scantable.permutated[i];
2744  level = block[j];
2745  if (level) {
2746  if (level < 0) {
2747  level = -level;
2748  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2749  level = -level;
2750  } else {
2751  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2752  }
2753  block[j] = level;
2754  }
2755  }
2756 }
2757 
2759  DCTELEM *block, int n, int qscale)
2760 {
2761  int i, level, nCoeffs;
2762  const uint16_t *quant_matrix;
2763  int sum=-1;
2764 
2765  if(s->alternate_scan) nCoeffs= 63;
2766  else nCoeffs= s->block_last_index[n];
2767 
2768  if (n < 4)
2769  block[0] = block[0] * s->y_dc_scale;
2770  else
2771  block[0] = block[0] * s->c_dc_scale;
2772  quant_matrix = s->intra_matrix;
2773  for(i=1;i<=nCoeffs;i++) {
2774  int j= s->intra_scantable.permutated[i];
2775  level = block[j];
2776  if (level) {
2777  if (level < 0) {
2778  level = -level;
2779  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2780  level = -level;
2781  } else {
2782  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2783  }
2784  block[j] = level;
2785  sum+=level;
2786  }
2787  }
2788  block[63]^=sum&1;
2789 }
2790 
2792  DCTELEM *block, int n, int qscale)
2793 {
2794  int i, level, nCoeffs;
2795  const uint16_t *quant_matrix;
2796  int sum=-1;
2797 
2798  if(s->alternate_scan) nCoeffs= 63;
2799  else nCoeffs= s->block_last_index[n];
2800 
2801  quant_matrix = s->inter_matrix;
2802  for(i=0; i<=nCoeffs; i++) {
2803  int j= s->intra_scantable.permutated[i];
2804  level = block[j];
2805  if (level) {
2806  if (level < 0) {
2807  level = -level;
2808  level = (((level << 1) + 1) * qscale *
2809  ((int) (quant_matrix[j]))) >> 4;
2810  level = -level;
2811  } else {
2812  level = (((level << 1) + 1) * qscale *
2813  ((int) (quant_matrix[j]))) >> 4;
2814  }
2815  block[j] = level;
2816  sum+=level;
2817  }
2818  }
2819  block[63]^=sum&1;
2820 }
2821 
2823  DCTELEM *block, int n, int qscale)
2824 {
2825  int i, level, qmul, qadd;
2826  int nCoeffs;
2827 
2828  assert(s->block_last_index[n]>=0);
2829 
2830  qmul = qscale << 1;
2831 
2832  if (!s->h263_aic) {
2833  if (n < 4)
2834  block[0] = block[0] * s->y_dc_scale;
2835  else
2836  block[0] = block[0] * s->c_dc_scale;
2837  qadd = (qscale - 1) | 1;
2838  }else{
2839  qadd = 0;
2840  }
2841  if(s->ac_pred)
2842  nCoeffs=63;
2843  else
2844  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2845 
2846  for(i=1; i<=nCoeffs; i++) {
2847  level = block[i];
2848  if (level) {
2849  if (level < 0) {
2850  level = level * qmul - qadd;
2851  } else {
2852  level = level * qmul + qadd;
2853  }
2854  block[i] = level;
2855  }
2856  }
2857 }
2858 
2860  DCTELEM *block, int n, int qscale)
2861 {
2862  int i, level, qmul, qadd;
2863  int nCoeffs;
2864 
2865  assert(s->block_last_index[n]>=0);
2866 
2867  qadd = (qscale - 1) | 1;
2868  qmul = qscale << 1;
2869 
2870  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2871 
2872  for(i=0; i<=nCoeffs; i++) {
2873  level = block[i];
2874  if (level) {
2875  if (level < 0) {
2876  level = level * qmul - qadd;
2877  } else {
2878  level = level * qmul + qadd;
2879  }
2880  block[i] = level;
2881  }
2882  }
2883 }
2884 
2888 void ff_set_qscale(MpegEncContext * s, int qscale)
2889 {
2890  if (qscale < 1)
2891  qscale = 1;
2892  else if (qscale > 31)
2893  qscale = 31;
2894 
2895  s->qscale = qscale;
2896  s->chroma_qscale= s->chroma_qscale_table[qscale];
2897 
2898  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2900 }
2901 
2903 {
2906 }