Libav
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/internal.h"
27 #include "avcodec.h"
28 #include "dsputil.h"
29 #include "h261.h"
30 #include "mpegvideo.h"
31 #include "mjpegenc.h"
32 #include "msmpeg4.h"
33 #include <limits.h>
34 
35 static void gmc1_motion(MpegEncContext *s,
36  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
38 {
39  uint8_t *ptr;
40  int src_x, src_y, motion_x, motion_y;
41  ptrdiff_t offset, linesize, uvlinesize;
42  int emu = 0;
43 
44  motion_x = s->sprite_offset[0][0];
45  motion_y = s->sprite_offset[0][1];
46  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
47  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
48  motion_x <<= (3 - s->sprite_warping_accuracy);
49  motion_y <<= (3 - s->sprite_warping_accuracy);
50  src_x = av_clip(src_x, -16, s->width);
51  if (src_x == s->width)
52  motion_x = 0;
53  src_y = av_clip(src_y, -16, s->height);
54  if (src_y == s->height)
55  motion_y = 0;
56 
57  linesize = s->linesize;
58  uvlinesize = s->uvlinesize;
59 
60  ptr = ref_picture[0] + src_y * linesize + src_x;
61 
62  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
63  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
65  linesize, linesize,
66  17, 17,
67  src_x, src_y,
68  s->h_edge_pos, s->v_edge_pos);
69  ptr = s->edge_emu_buffer;
70  }
71 
72  if ((motion_x | motion_y) & 7) {
73  s->dsp.gmc1(dest_y, ptr, linesize, 16,
74  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
75  s->dsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
76  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
77  } else {
78  int dxy;
79 
80  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
81  if (s->no_rounding) {
82  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
83  } else {
84  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
85  }
86  }
87 
88  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
89  return;
90 
91  motion_x = s->sprite_offset[1][0];
92  motion_y = s->sprite_offset[1][1];
93  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
94  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
95  motion_x <<= (3 - s->sprite_warping_accuracy);
96  motion_y <<= (3 - s->sprite_warping_accuracy);
97  src_x = av_clip(src_x, -8, s->width >> 1);
98  if (src_x == s->width >> 1)
99  motion_x = 0;
100  src_y = av_clip(src_y, -8, s->height >> 1);
101  if (src_y == s->height >> 1)
102  motion_y = 0;
103 
104  offset = (src_y * uvlinesize) + src_x;
105  ptr = ref_picture[1] + offset;
106  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
107  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
109  uvlinesize, uvlinesize,
110  9, 9,
111  src_x, src_y,
112  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
113  ptr = s->edge_emu_buffer;
114  emu = 1;
115  }
116  s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8,
117  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
118 
119  ptr = ref_picture[2] + offset;
120  if (emu) {
122  uvlinesize, uvlinesize,
123  9, 9,
124  src_x, src_y,
125  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
126  ptr = s->edge_emu_buffer;
127  }
128  s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8,
129  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
130 }
131 
132 static void gmc_motion(MpegEncContext *s,
133  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
135 {
136  uint8_t *ptr;
137  int linesize, uvlinesize;
138  const int a = s->sprite_warping_accuracy;
139  int ox, oy;
140 
141  linesize = s->linesize;
142  uvlinesize = s->uvlinesize;
143 
144  ptr = ref_picture[0];
145 
146  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
147  s->sprite_delta[0][1] * s->mb_y * 16;
148  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
149  s->sprite_delta[1][1] * s->mb_y * 16;
150 
151  s->dsp.gmc(dest_y, ptr, linesize, 16,
152  ox, oy,
153  s->sprite_delta[0][0], s->sprite_delta[0][1],
154  s->sprite_delta[1][0], s->sprite_delta[1][1],
155  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
156  s->h_edge_pos, s->v_edge_pos);
157  s->dsp.gmc(dest_y + 8, ptr, linesize, 16,
158  ox + s->sprite_delta[0][0] * 8,
159  oy + s->sprite_delta[1][0] * 8,
160  s->sprite_delta[0][0], s->sprite_delta[0][1],
161  s->sprite_delta[1][0], s->sprite_delta[1][1],
162  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
163  s->h_edge_pos, s->v_edge_pos);
164 
165  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
166  return;
167 
168  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
169  s->sprite_delta[0][1] * s->mb_y * 8;
170  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
171  s->sprite_delta[1][1] * s->mb_y * 8;
172 
173  ptr = ref_picture[1];
174  s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
175  ox, oy,
176  s->sprite_delta[0][0], s->sprite_delta[0][1],
177  s->sprite_delta[1][0], s->sprite_delta[1][1],
178  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
179  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
180 
181  ptr = ref_picture[2];
182  s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
183  ox, oy,
184  s->sprite_delta[0][0], s->sprite_delta[0][1],
185  s->sprite_delta[1][0], s->sprite_delta[1][1],
186  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
187  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
188 }
189 
190 static inline int hpel_motion(MpegEncContext *s,
191  uint8_t *dest, uint8_t *src,
192  int src_x, int src_y,
193  op_pixels_func *pix_op,
194  int motion_x, int motion_y)
195 {
196  int dxy = 0;
197  int emu = 0;
198 
199  src_x += motion_x >> 1;
200  src_y += motion_y >> 1;
201 
202  /* WARNING: do no forget half pels */
203  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
204  if (src_x != s->width)
205  dxy |= motion_x & 1;
206  src_y = av_clip(src_y, -16, s->height);
207  if (src_y != s->height)
208  dxy |= (motion_y & 1) << 1;
209  src += src_y * s->linesize + src_x;
210 
211  if (s->unrestricted_mv) {
212  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
213  (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
215  s->linesize, s->linesize,
216  9, 9,
217  src_x, src_y, s->h_edge_pos,
218  s->v_edge_pos);
219  src = s->edge_emu_buffer;
220  emu = 1;
221  }
222  }
223  pix_op[dxy](dest, src, s->linesize, 8);
224  return emu;
225 }
226 
227 static av_always_inline
229  uint8_t *dest_y,
230  uint8_t *dest_cb,
231  uint8_t *dest_cr,
232  int field_based,
233  int bottom_field,
234  int field_select,
236  op_pixels_func (*pix_op)[4],
237  int motion_x,
238  int motion_y,
239  int h,
240  int is_mpeg12,
241  int mb_y)
242 {
243  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
244  int dxy, uvdxy, mx, my, src_x, src_y,
245  uvsrc_x, uvsrc_y, v_edge_pos;
246  ptrdiff_t uvlinesize, linesize;
247 
248 #if 0
249  if (s->quarter_sample) {
250  motion_x >>= 1;
251  motion_y >>= 1;
252  }
253 #endif
254 
255  v_edge_pos = s->v_edge_pos >> field_based;
256  linesize = s->current_picture.f.linesize[0] << field_based;
257  uvlinesize = s->current_picture.f.linesize[1] << field_based;
258 
259  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
260  src_x = s->mb_x * 16 + (motion_x >> 1);
261  src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
262 
263  if (!is_mpeg12 && s->out_format == FMT_H263) {
264  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
265  mx = (motion_x >> 1) | (motion_x & 1);
266  my = motion_y >> 1;
267  uvdxy = ((my & 1) << 1) | (mx & 1);
268  uvsrc_x = s->mb_x * 8 + (mx >> 1);
269  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
270  } else {
271  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
272  uvsrc_x = src_x >> 1;
273  uvsrc_y = src_y >> 1;
274  }
275  // Even chroma mv's are full pel in H261
276  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
277  mx = motion_x / 4;
278  my = motion_y / 4;
279  uvdxy = 0;
280  uvsrc_x = s->mb_x * 8 + mx;
281  uvsrc_y = mb_y * 8 + my;
282  } else {
283  if (s->chroma_y_shift) {
284  mx = motion_x / 2;
285  my = motion_y / 2;
286  uvdxy = ((my & 1) << 1) | (mx & 1);
287  uvsrc_x = s->mb_x * 8 + (mx >> 1);
288  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
289  } else {
290  if (s->chroma_x_shift) {
291  // Chroma422
292  mx = motion_x / 2;
293  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
294  uvsrc_x = s->mb_x * 8 + (mx >> 1);
295  uvsrc_y = src_y;
296  } else {
297  // Chroma444
298  uvdxy = dxy;
299  uvsrc_x = src_x;
300  uvsrc_y = src_y;
301  }
302  }
303  }
304 
305  ptr_y = ref_picture[0] + src_y * linesize + src_x;
306  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
307  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
308 
309  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 16, 0) ||
310  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 1) - h, 0)) {
311  if (is_mpeg12 ||
315  "MPEG motion vector out of boundary (%d %d)\n", src_x,
316  src_y);
317  return;
318  }
320  s->linesize, s->linesize,
321  17, 17 + field_based,
322  src_x, src_y << field_based,
323  s->h_edge_pos, s->v_edge_pos);
324  ptr_y = s->edge_emu_buffer;
325  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
326  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
327  s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
328  s->uvlinesize, s->uvlinesize,
329  9, 9 + field_based,
330  uvsrc_x, uvsrc_y << field_based,
331  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
332  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
333  s->uvlinesize, s->uvlinesize,
334  9, 9 + field_based,
335  uvsrc_x, uvsrc_y << field_based,
336  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
337  ptr_cb = uvbuf;
338  ptr_cr = uvbuf + 16;
339  }
340  }
341 
342  /* FIXME use this for field pix too instead of the obnoxious hack which
343  * changes picture.data */
344  if (bottom_field) {
345  dest_y += s->linesize;
346  dest_cb += s->uvlinesize;
347  dest_cr += s->uvlinesize;
348  }
349 
350  if (field_select) {
351  ptr_y += s->linesize;
352  ptr_cb += s->uvlinesize;
353  ptr_cr += s->uvlinesize;
354  }
355 
356  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
357 
358  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
359  pix_op[s->chroma_x_shift][uvdxy]
360  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
361  pix_op[s->chroma_x_shift][uvdxy]
362  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
363  }
364  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
365  s->out_format == FMT_H261) {
367  }
368 }
369 /* apply one mpeg motion vector to the three components */
371  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
372  int field_select, uint8_t **ref_picture,
373  op_pixels_func (*pix_op)[4],
374  int motion_x, int motion_y, int h, int mb_y)
375 {
376 #if !CONFIG_SMALL
377  if (s->out_format == FMT_MPEG1)
378  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
379  field_select, ref_picture, pix_op,
380  motion_x, motion_y, h, 1, mb_y);
381  else
382 #endif
383  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
384  field_select, ref_picture, pix_op,
385  motion_x, motion_y, h, 0, mb_y);
386 }
387 
388 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
389  uint8_t *dest_cb, uint8_t *dest_cr,
390  int bottom_field, int field_select,
391  uint8_t **ref_picture,
392  op_pixels_func (*pix_op)[4],
393  int motion_x, int motion_y, int h, int mb_y)
394 {
395 #if !CONFIG_SMALL
396  if(s->out_format == FMT_MPEG1)
397  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
398  bottom_field, field_select, ref_picture, pix_op,
399  motion_x, motion_y, h, 1, mb_y);
400  else
401 #endif
402  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
403  bottom_field, field_select, ref_picture, pix_op,
404  motion_x, motion_y, h, 0, mb_y);
405 }
406 
407 // FIXME move to dsputil, avg variant, 16x16 version
408 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
409 {
410  int x;
411  uint8_t *const top = src[1];
412  uint8_t *const left = src[2];
413  uint8_t *const mid = src[0];
414  uint8_t *const right = src[3];
415  uint8_t *const bottom = src[4];
416 #define OBMC_FILTER(x, t, l, m, r, b)\
417  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
418 #define OBMC_FILTER4(x, t, l, m, r, b)\
419  OBMC_FILTER(x , t, l, m, r, b);\
420  OBMC_FILTER(x+1 , t, l, m, r, b);\
421  OBMC_FILTER(x +stride, t, l, m, r, b);\
422  OBMC_FILTER(x+1+stride, t, l, m, r, b);
423 
424  x = 0;
425  OBMC_FILTER (x , 2, 2, 4, 0, 0);
426  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
427  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
428  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
429  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
430  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
431  x += stride;
432  OBMC_FILTER (x , 1, 2, 5, 0, 0);
433  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
434  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
435  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
436  x += stride;
437  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
438  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
439  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
440  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
441  x += 2 * stride;
442  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
443  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
444  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
445  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
446  x += 2*stride;
447  OBMC_FILTER (x , 0, 2, 5, 0, 1);
448  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
449  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
450  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
451  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
452  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
453  x += stride;
454  OBMC_FILTER (x , 0, 2, 4, 0, 2);
455  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
456  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
457  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
458 }
459 
460 /* obmc for 1 8x8 luma block */
461 static inline void obmc_motion(MpegEncContext *s,
462  uint8_t *dest, uint8_t *src,
463  int src_x, int src_y,
464  op_pixels_func *pix_op,
465  int16_t mv[5][2] /* mid top left right bottom */)
466 #define MID 0
467 {
468  int i;
469  uint8_t *ptr[5];
470 
471  assert(s->quarter_sample == 0);
472 
473  for (i = 0; i < 5; i++) {
474  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
475  ptr[i] = ptr[MID];
476  } else {
477  ptr[i] = s->obmc_scratchpad + 8 * (i & 1) +
478  s->linesize * 8 * (i >> 1);
479  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
480  mv[i][0], mv[i][1]);
481  }
482  }
483 
484  put_obmc(dest, ptr, s->linesize);
485 }
486 
487 static inline void qpel_motion(MpegEncContext *s,
488  uint8_t *dest_y,
489  uint8_t *dest_cb,
490  uint8_t *dest_cr,
491  int field_based, int bottom_field,
492  int field_select, uint8_t **ref_picture,
493  op_pixels_func (*pix_op)[4],
494  qpel_mc_func (*qpix_op)[16],
495  int motion_x, int motion_y, int h)
496 {
497  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
498  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
499  ptrdiff_t linesize, uvlinesize;
500 
501  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
502 
503  src_x = s->mb_x * 16 + (motion_x >> 2);
504  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
505 
506  v_edge_pos = s->v_edge_pos >> field_based;
507  linesize = s->linesize << field_based;
508  uvlinesize = s->uvlinesize << field_based;
509 
510  if (field_based) {
511  mx = motion_x / 2;
512  my = motion_y >> 1;
513  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
514  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
515  mx = (motion_x >> 1) + rtab[motion_x & 7];
516  my = (motion_y >> 1) + rtab[motion_y & 7];
517  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
518  mx = (motion_x >> 1) | (motion_x & 1);
519  my = (motion_y >> 1) | (motion_y & 1);
520  } else {
521  mx = motion_x / 2;
522  my = motion_y / 2;
523  }
524  mx = (mx >> 1) | (mx & 1);
525  my = (my >> 1) | (my & 1);
526 
527  uvdxy = (mx & 1) | ((my & 1) << 1);
528  mx >>= 1;
529  my >>= 1;
530 
531  uvsrc_x = s->mb_x * 8 + mx;
532  uvsrc_y = s->mb_y * (8 >> field_based) + my;
533 
534  ptr_y = ref_picture[0] + src_y * linesize + src_x;
535  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
536  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
537 
538  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) ||
539  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h, 0)) {
541  s->linesize, s->linesize,
542  17, 17 + field_based,
543  src_x, src_y << field_based,
544  s->h_edge_pos, s->v_edge_pos);
545  ptr_y = s->edge_emu_buffer;
546  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
547  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
548  s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
549  s->uvlinesize, s->uvlinesize,
550  9, 9 + field_based,
551  uvsrc_x, uvsrc_y << field_based,
552  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
553  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
554  s->uvlinesize, s->uvlinesize,
555  9, 9 + field_based,
556  uvsrc_x, uvsrc_y << field_based,
557  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
558  ptr_cb = uvbuf;
559  ptr_cr = uvbuf + 16;
560  }
561  }
562 
563  if (!field_based)
564  qpix_op[0][dxy](dest_y, ptr_y, linesize);
565  else {
566  if (bottom_field) {
567  dest_y += s->linesize;
568  dest_cb += s->uvlinesize;
569  dest_cr += s->uvlinesize;
570  }
571 
572  if (field_select) {
573  ptr_y += s->linesize;
574  ptr_cb += s->uvlinesize;
575  ptr_cr += s->uvlinesize;
576  }
577  // damn interlaced mode
578  // FIXME boundary mirroring is not exactly correct here
579  qpix_op[1][dxy](dest_y, ptr_y, linesize);
580  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
581  }
582  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
583  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
584  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
585  }
586 }
587 
592  uint8_t *dest_cb, uint8_t *dest_cr,
593  uint8_t **ref_picture,
594  op_pixels_func *pix_op,
595  int mx, int my)
596 {
597  uint8_t *ptr;
598  int src_x, src_y, dxy, emu = 0;
599  ptrdiff_t offset;
600 
601  /* In case of 8X8, we construct a single chroma motion vector
602  * with a special rounding */
603  mx = ff_h263_round_chroma(mx);
604  my = ff_h263_round_chroma(my);
605 
606  dxy = ((my & 1) << 1) | (mx & 1);
607  mx >>= 1;
608  my >>= 1;
609 
610  src_x = s->mb_x * 8 + mx;
611  src_y = s->mb_y * 8 + my;
612  src_x = av_clip(src_x, -8, (s->width >> 1));
613  if (src_x == (s->width >> 1))
614  dxy &= ~1;
615  src_y = av_clip(src_y, -8, (s->height >> 1));
616  if (src_y == (s->height >> 1))
617  dxy &= ~2;
618 
619  offset = src_y * s->uvlinesize + src_x;
620  ptr = ref_picture[1] + offset;
621  if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) ||
622  (unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) {
624  s->uvlinesize, s->uvlinesize,
625  9, 9, src_x, src_y,
626  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
627  ptr = s->edge_emu_buffer;
628  emu = 1;
629  }
630  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
631 
632  ptr = ref_picture[2] + offset;
633  if (emu) {
635  s->uvlinesize, s->uvlinesize,
636  9, 9, src_x, src_y,
637  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
638  ptr = s->edge_emu_buffer;
639  }
640  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
641 }
642 
643 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
644 {
645  /* fetch pixels for estimated mv 4 macroblocks ahead
646  * optimized for 64byte cache lines */
647  const int shift = s->quarter_sample ? 2 : 1;
648  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
649  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
650  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
651 
652  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
653  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
654  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
655 }
656 
657 static inline void apply_obmc(MpegEncContext *s,
658  uint8_t *dest_y,
659  uint8_t *dest_cb,
660  uint8_t *dest_cr,
661  uint8_t **ref_picture,
662  op_pixels_func (*pix_op)[4])
663 {
664  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
665  Picture *cur_frame = &s->current_picture;
666  int mb_x = s->mb_x;
667  int mb_y = s->mb_y;
668  const int xy = mb_x + mb_y * s->mb_stride;
669  const int mot_stride = s->b8_stride;
670  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
671  int mx, my, i;
672 
673  assert(!s->mb_skipped);
674 
675  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
676  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
677 
678  AV_COPY32(mv_cache[2][1],
679  cur_frame->motion_val[0][mot_xy + mot_stride]);
680  AV_COPY32(mv_cache[2][2],
681  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
682 
683  AV_COPY32(mv_cache[3][1],
684  cur_frame->motion_val[0][mot_xy + mot_stride]);
685  AV_COPY32(mv_cache[3][2],
686  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
687 
688  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
689  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
690  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
691  } else {
692  AV_COPY32(mv_cache[0][1],
693  cur_frame->motion_val[0][mot_xy - mot_stride]);
694  AV_COPY32(mv_cache[0][2],
695  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
696  }
697 
698  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
699  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
700  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
701  } else {
702  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
703  AV_COPY32(mv_cache[2][0],
704  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
705  }
706 
707  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
708  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
709  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
710  } else {
711  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
712  AV_COPY32(mv_cache[2][3],
713  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
714  }
715 
716  mx = 0;
717  my = 0;
718  for (i = 0; i < 4; i++) {
719  const int x = (i & 1) + 1;
720  const int y = (i >> 1) + 1;
721  int16_t mv[5][2] = {
722  { mv_cache[y][x][0], mv_cache[y][x][1] },
723  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
724  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
725  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
726  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
727  };
728  // FIXME cleanup
729  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
730  ref_picture[0],
731  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
732  pix_op[1],
733  mv);
734 
735  mx += mv[0][0];
736  my += mv[0][1];
737  }
738  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
739  chroma_4mv_motion(s, dest_cb, dest_cr,
740  ref_picture, pix_op[1],
741  mx, my);
742 }
743 
744 static inline void apply_8x8(MpegEncContext *s,
745  uint8_t *dest_y,
746  uint8_t *dest_cb,
747  uint8_t *dest_cr,
748  int dir,
749  uint8_t **ref_picture,
750  qpel_mc_func (*qpix_op)[16],
751  op_pixels_func (*pix_op)[4])
752 {
753  int dxy, mx, my, src_x, src_y;
754  int i;
755  int mb_x = s->mb_x;
756  int mb_y = s->mb_y;
757  uint8_t *ptr, *dest;
758 
759  mx = 0;
760  my = 0;
761  if (s->quarter_sample) {
762  for (i = 0; i < 4; i++) {
763  int motion_x = s->mv[dir][i][0];
764  int motion_y = s->mv[dir][i][1];
765 
766  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
767  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
768  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
769 
770  /* WARNING: do no forget half pels */
771  src_x = av_clip(src_x, -16, s->width);
772  if (src_x == s->width)
773  dxy &= ~3;
774  src_y = av_clip(src_y, -16, s->height);
775  if (src_y == s->height)
776  dxy &= ~12;
777 
778  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
779  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) ||
780  (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) {
782  s->linesize, s->linesize,
783  9, 9,
784  src_x, src_y,
785  s->h_edge_pos,
786  s->v_edge_pos);
787  ptr = s->edge_emu_buffer;
788  }
789  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
790  qpix_op[1][dxy](dest, ptr, s->linesize);
791 
792  mx += s->mv[dir][i][0] / 2;
793  my += s->mv[dir][i][1] / 2;
794  }
795  } else {
796  for (i = 0; i < 4; i++) {
797  hpel_motion(s,
798  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
799  ref_picture[0],
800  mb_x * 16 + (i & 1) * 8,
801  mb_y * 16 + (i >> 1) * 8,
802  pix_op[1],
803  s->mv[dir][i][0],
804  s->mv[dir][i][1]);
805 
806  mx += s->mv[dir][i][0];
807  my += s->mv[dir][i][1];
808  }
809  }
810 
811  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
812  chroma_4mv_motion(s, dest_cb, dest_cr,
813  ref_picture, pix_op[1], mx, my);
814 }
815 
829  uint8_t *dest_y,
830  uint8_t *dest_cb,
831  uint8_t *dest_cr,
832  int dir,
833  uint8_t **ref_picture,
834  op_pixels_func (*pix_op)[4],
835  qpel_mc_func (*qpix_op)[16],
836  int is_mpeg12)
837 {
838  int i;
839  int mb_y = s->mb_y;
840 
841  prefetch_motion(s, ref_picture, dir);
842 
843  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
844  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
845  return;
846  }
847 
848  switch (s->mv_type) {
849  case MV_TYPE_16X16:
850  if (s->mcsel) {
851  if (s->real_sprite_warping_points == 1) {
852  gmc1_motion(s, dest_y, dest_cb, dest_cr,
853  ref_picture);
854  } else {
855  gmc_motion(s, dest_y, dest_cb, dest_cr,
856  ref_picture);
857  }
858  } else if (!is_mpeg12 && s->quarter_sample) {
859  qpel_motion(s, dest_y, dest_cb, dest_cr,
860  0, 0, 0,
861  ref_picture, pix_op, qpix_op,
862  s->mv[dir][0][0], s->mv[dir][0][1], 16);
863  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
864  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
865  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
866  ref_picture, pix_op,
867  s->mv[dir][0][0], s->mv[dir][0][1], 16);
868  } else {
869  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
870  ref_picture, pix_op,
871  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
872  }
873  break;
874  case MV_TYPE_8X8:
875  if (!is_mpeg12)
876  apply_8x8(s, dest_y, dest_cb, dest_cr,
877  dir, ref_picture, qpix_op, pix_op);
878  break;
879  case MV_TYPE_FIELD:
880  if (s->picture_structure == PICT_FRAME) {
881  if (!is_mpeg12 && s->quarter_sample) {
882  for (i = 0; i < 2; i++)
883  qpel_motion(s, dest_y, dest_cb, dest_cr,
884  1, i, s->field_select[dir][i],
885  ref_picture, pix_op, qpix_op,
886  s->mv[dir][i][0], s->mv[dir][i][1], 8);
887  } else {
888  /* top field */
889  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
890  0, s->field_select[dir][0],
891  ref_picture, pix_op,
892  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
893  /* bottom field */
894  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
895  1, s->field_select[dir][1],
896  ref_picture, pix_op,
897  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
898  }
899  } else {
900  if (s->picture_structure != s->field_select[dir][0] + 1 &&
901  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
902  ref_picture = s->current_picture_ptr->f.data;
903  }
904 
905  mpeg_motion(s, dest_y, dest_cb, dest_cr,
906  s->field_select[dir][0],
907  ref_picture, pix_op,
908  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
909  }
910  break;
911  case MV_TYPE_16X8:
912  for (i = 0; i < 2; i++) {
913  uint8_t **ref2picture;
914 
915  if (s->picture_structure == s->field_select[dir][i] + 1
916  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
917  ref2picture = ref_picture;
918  } else {
919  ref2picture = s->current_picture_ptr->f.data;
920  }
921 
922  mpeg_motion(s, dest_y, dest_cb, dest_cr,
923  s->field_select[dir][i],
924  ref2picture, pix_op,
925  s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
926  8, mb_y >> 1);
927 
928  dest_y += 16 * s->linesize;
929  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
930  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
931  }
932  break;
933  case MV_TYPE_DMV:
934  if (s->picture_structure == PICT_FRAME) {
935  for (i = 0; i < 2; i++) {
936  int j;
937  for (j = 0; j < 2; j++)
938  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
939  j, j ^ i, ref_picture, pix_op,
940  s->mv[dir][2 * i + j][0],
941  s->mv[dir][2 * i + j][1], 8, mb_y);
942  pix_op = s->hdsp.avg_pixels_tab;
943  }
944  } else {
945  for (i = 0; i < 2; i++) {
946  mpeg_motion(s, dest_y, dest_cb, dest_cr,
947  s->picture_structure != i + 1,
948  ref_picture, pix_op,
949  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
950  16, mb_y >> 1);
951 
952  // after put we make avg of the same block
953  pix_op = s->hdsp.avg_pixels_tab;
954 
955  /* opposite parity is always in the same frame if this is
956  * second field */
957  if (!s->first_field) {
958  ref_picture = s->current_picture_ptr->f.data;
959  }
960  }
961  }
962  break;
963  default: assert(0);
964  }
965 }
966 
968  uint8_t *dest_y, uint8_t *dest_cb,
969  uint8_t *dest_cr, int dir,
970  uint8_t **ref_picture,
971  op_pixels_func (*pix_op)[4],
972  qpel_mc_func (*qpix_op)[16])
973 {
974 #if !CONFIG_SMALL
975  if (s->out_format == FMT_MPEG1)
976  MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
977  ref_picture, pix_op, qpix_op, 1);
978  else
979 #endif
980  MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
981  ref_picture, pix_op, qpix_op, 0);
982 }
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:444
int sprite_warping_accuracy
Definition: mpegvideo.h:588
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:304
enum AVCodecID codec_id
Definition: mpegvideo.h:280
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:2310
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:555
int real_sprite_warping_points
Definition: mpegvideo.h:581
mpegvideo header.
int stride
Definition: mace.c:144
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
int chroma_x_shift
Definition: mpegvideo.h:661
int field_select[2][2]
Definition: mpegvideo.h:452
uint8_t
#define PICT_FRAME
Definition: mpegvideo.h:646
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
h263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
Definition: mpegvideo.h:272
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:459
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:366
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:2307
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
Definition: mpegvideo.h:582
#define LOCAL_ALIGNED_8(t, v,...)
Definition: internal.h:100
#define MID
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
static int ff_h263_round_chroma(int x)
Definition: mpegvideo.h:855
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:381
int chroma_y_shift
Definition: mpegvideo.h:662
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:408
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:387
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:148
#define CONFIG_WMV2_DECODER
Definition: config.h:556
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:586
#define FFMAX(a, b)
Definition: common.h:55
int off
Definition: dsputil_bfin.c:29
common internal API header
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
Definition: mpegvideo.h:583
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:370
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:90
Picture.
Definition: mpegvideo.h:99
#define CONFIG_GRAY
Definition: config.h:330
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:2305
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:110
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
#define OBMC_FILTER4(x, t, l, m, r, b)
static av_always_inline void MPV_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:667
static const int8_t mv[256][2]
Definition: 4xm.c:72
#define CONFIG_H261_ENCODER
Definition: config.h:888
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:441
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:306
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:125
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:81
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:268
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:443
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: dsputil.h:136
h261codec.
#define CONFIG_WMV2_ENCODER
Definition: config.h:922
#define CONFIG_H261_DECODER
Definition: config.h:448
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:307
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:411
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:399
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:113
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:451
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:302
MpegEncContext.
Definition: mpegvideo.h:264
struct AVCodecContext * avctx
Definition: mpegvideo.h:266
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:301
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
void ff_MPV_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * dest[3]
Definition: mpegvideo.h:487
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
Definition: dsputil.h:140
uint8_t * obmc_scratchpad
Definition: mpegvideo.h:389
Bi-dir predicted.
Definition: avutil.h:255
DSP utils.
#define IS_INTRA(x, y)
int picture_structure
Definition: mpegvideo.h:642
VideoDSPContext vdsp
Definition: mpegvideo.h:413
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:445
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
struct AVFrame f
Definition: mpegvideo.h:100
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:283
uint32_t * mb_type
Definition: mpegvideo.h:110
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:289
#define av_always_inline
Definition: attributes.h:40
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:63
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
Definition: h264.c:290
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:442
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)
HpelDSPContext hdsp
Definition: mpegvideo.h:412