Libav
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/internal.h"
27 #include "avcodec.h"
28 #include "h261.h"
29 #include "mpegutils.h"
30 #include "mpegvideo.h"
31 #include "mjpegenc.h"
32 #include "msmpeg4.h"
33 #include "qpeldsp.h"
34 #include <limits.h>
35 
36 static void gmc1_motion(MpegEncContext *s,
37  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
38  uint8_t **ref_picture)
39 {
40  uint8_t *ptr;
41  int src_x, src_y, motion_x, motion_y;
42  ptrdiff_t offset, linesize, uvlinesize;
43  int emu = 0;
44 
45  motion_x = s->sprite_offset[0][0];
46  motion_y = s->sprite_offset[0][1];
47  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
48  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
49  motion_x <<= (3 - s->sprite_warping_accuracy);
50  motion_y <<= (3 - s->sprite_warping_accuracy);
51  src_x = av_clip(src_x, -16, s->width);
52  if (src_x == s->width)
53  motion_x = 0;
54  src_y = av_clip(src_y, -16, s->height);
55  if (src_y == s->height)
56  motion_y = 0;
57 
58  linesize = s->linesize;
59  uvlinesize = s->uvlinesize;
60 
61  ptr = ref_picture[0] + src_y * linesize + src_x;
62 
63  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
64  (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
66  linesize, linesize,
67  17, 17,
68  src_x, src_y,
69  s->h_edge_pos, s->v_edge_pos);
70  ptr = s->edge_emu_buffer;
71  }
72 
73  if ((motion_x | motion_y) & 7) {
74  s->mdsp.gmc1(dest_y, ptr, linesize, 16,
75  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
76  s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
77  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
78  } else {
79  int dxy;
80 
81  dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
82  if (s->no_rounding) {
83  s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
84  } else {
85  s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
86  }
87  }
88 
89  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
90  return;
91 
92  motion_x = s->sprite_offset[1][0];
93  motion_y = s->sprite_offset[1][1];
94  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
95  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
96  motion_x <<= (3 - s->sprite_warping_accuracy);
97  motion_y <<= (3 - s->sprite_warping_accuracy);
98  src_x = av_clip(src_x, -8, s->width >> 1);
99  if (src_x == s->width >> 1)
100  motion_x = 0;
101  src_y = av_clip(src_y, -8, s->height >> 1);
102  if (src_y == s->height >> 1)
103  motion_y = 0;
104 
105  offset = (src_y * uvlinesize) + src_x;
106  ptr = ref_picture[1] + offset;
107  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
108  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
110  uvlinesize, uvlinesize,
111  9, 9,
112  src_x, src_y,
113  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
114  ptr = s->edge_emu_buffer;
115  emu = 1;
116  }
117  s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
118  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
119 
120  ptr = ref_picture[2] + offset;
121  if (emu) {
123  uvlinesize, uvlinesize,
124  9, 9,
125  src_x, src_y,
126  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
127  ptr = s->edge_emu_buffer;
128  }
129  s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
130  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
131 }
132 
133 static void gmc_motion(MpegEncContext *s,
134  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
135  uint8_t **ref_picture)
136 {
137  uint8_t *ptr;
138  int linesize, uvlinesize;
139  const int a = s->sprite_warping_accuracy;
140  int ox, oy;
141 
142  linesize = s->linesize;
143  uvlinesize = s->uvlinesize;
144 
145  ptr = ref_picture[0];
146 
147  ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
148  s->sprite_delta[0][1] * s->mb_y * 16;
149  oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
150  s->sprite_delta[1][1] * s->mb_y * 16;
151 
152  s->mdsp.gmc(dest_y, ptr, linesize, 16,
153  ox, oy,
154  s->sprite_delta[0][0], s->sprite_delta[0][1],
155  s->sprite_delta[1][0], s->sprite_delta[1][1],
156  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
157  s->h_edge_pos, s->v_edge_pos);
158  s->mdsp.gmc(dest_y + 8, ptr, linesize, 16,
159  ox + s->sprite_delta[0][0] * 8,
160  oy + s->sprite_delta[1][0] * 8,
161  s->sprite_delta[0][0], s->sprite_delta[0][1],
162  s->sprite_delta[1][0], s->sprite_delta[1][1],
163  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
164  s->h_edge_pos, s->v_edge_pos);
165 
166  if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
167  return;
168 
169  ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
170  s->sprite_delta[0][1] * s->mb_y * 8;
171  oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
172  s->sprite_delta[1][1] * s->mb_y * 8;
173 
174  ptr = ref_picture[1];
175  s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8,
176  ox, oy,
177  s->sprite_delta[0][0], s->sprite_delta[0][1],
178  s->sprite_delta[1][0], s->sprite_delta[1][1],
179  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
180  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
181 
182  ptr = ref_picture[2];
183  s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8,
184  ox, oy,
185  s->sprite_delta[0][0], s->sprite_delta[0][1],
186  s->sprite_delta[1][0], s->sprite_delta[1][1],
187  a + 1, (1 << (2 * a + 1)) - s->no_rounding,
188  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
189 }
190 
191 static inline int hpel_motion(MpegEncContext *s,
192  uint8_t *dest, uint8_t *src,
193  int src_x, int src_y,
194  op_pixels_func *pix_op,
195  int motion_x, int motion_y)
196 {
197  int dxy = 0;
198  int emu = 0;
199 
200  src_x += motion_x >> 1;
201  src_y += motion_y >> 1;
202 
203  /* WARNING: do no forget half pels */
204  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
205  if (src_x != s->width)
206  dxy |= motion_x & 1;
207  src_y = av_clip(src_y, -16, s->height);
208  if (src_y != s->height)
209  dxy |= (motion_y & 1) << 1;
210  src += src_y * s->linesize + src_x;
211 
212  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
213  (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
215  s->linesize, s->linesize,
216  9, 9, src_x, src_y,
217  s->h_edge_pos, s->v_edge_pos);
218  src = s->edge_emu_buffer;
219  emu = 1;
220  }
221  pix_op[dxy](dest, src, s->linesize, 8);
222  return emu;
223 }
224 
225 static av_always_inline
227  uint8_t *dest_y,
228  uint8_t *dest_cb,
229  uint8_t *dest_cr,
230  int field_based,
231  int bottom_field,
232  int field_select,
233  uint8_t **ref_picture,
234  op_pixels_func (*pix_op)[4],
235  int motion_x,
236  int motion_y,
237  int h,
238  int is_mpeg12,
239  int mb_y)
240 {
241  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
242  int dxy, uvdxy, mx, my, src_x, src_y,
243  uvsrc_x, uvsrc_y, v_edge_pos;
244  ptrdiff_t uvlinesize, linesize;
245 
246 #if 0
247  if (s->quarter_sample) {
248  motion_x >>= 1;
249  motion_y >>= 1;
250  }
251 #endif
252 
253  v_edge_pos = s->v_edge_pos >> field_based;
254  linesize = s->current_picture.f->linesize[0] << field_based;
255  uvlinesize = s->current_picture.f->linesize[1] << field_based;
256 
257  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
258  src_x = s->mb_x * 16 + (motion_x >> 1);
259  src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
260 
261  if (!is_mpeg12 && s->out_format == FMT_H263) {
262  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
263  mx = (motion_x >> 1) | (motion_x & 1);
264  my = motion_y >> 1;
265  uvdxy = ((my & 1) << 1) | (mx & 1);
266  uvsrc_x = s->mb_x * 8 + (mx >> 1);
267  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
268  } else {
269  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
270  uvsrc_x = src_x >> 1;
271  uvsrc_y = src_y >> 1;
272  }
273  // Even chroma mv's are full pel in H261
274  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
275  mx = motion_x / 4;
276  my = motion_y / 4;
277  uvdxy = 0;
278  uvsrc_x = s->mb_x * 8 + mx;
279  uvsrc_y = mb_y * 8 + my;
280  } else {
281  if (s->chroma_y_shift) {
282  mx = motion_x / 2;
283  my = motion_y / 2;
284  uvdxy = ((my & 1) << 1) | (mx & 1);
285  uvsrc_x = s->mb_x * 8 + (mx >> 1);
286  uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
287  } else {
288  if (s->chroma_x_shift) {
289  // Chroma422
290  mx = motion_x / 2;
291  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
292  uvsrc_x = s->mb_x * 8 + (mx >> 1);
293  uvsrc_y = src_y;
294  } else {
295  // Chroma444
296  uvdxy = dxy;
297  uvsrc_x = src_x;
298  uvsrc_y = src_y;
299  }
300  }
301  }
302 
303  ptr_y = ref_picture[0] + src_y * linesize + src_x;
304  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
305  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
306 
307  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 15, 0) ||
308  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 1) - h + 1, 0)) {
309  if (is_mpeg12 ||
313  "MPEG motion vector out of boundary (%d %d)\n", src_x,
314  src_y);
315  return;
316  }
318  s->linesize, s->linesize,
319  17, 17 + field_based,
320  src_x, src_y * (1 << field_based),
321  s->h_edge_pos, s->v_edge_pos);
322  ptr_y = s->edge_emu_buffer;
323  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
324  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
325  s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
326  s->uvlinesize, s->uvlinesize,
327  9, 9 + field_based,
328  uvsrc_x, uvsrc_y * (1 << field_based),
329  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
330  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
331  s->uvlinesize, s->uvlinesize,
332  9, 9 + field_based,
333  uvsrc_x, uvsrc_y * (1 << field_based),
334  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
335  ptr_cb = uvbuf;
336  ptr_cr = uvbuf + 16;
337  }
338  }
339 
340  /* FIXME use this for field pix too instead of the obnoxious hack which
341  * changes picture.data */
342  if (bottom_field) {
343  dest_y += s->linesize;
344  dest_cb += s->uvlinesize;
345  dest_cr += s->uvlinesize;
346  }
347 
348  if (field_select) {
349  ptr_y += s->linesize;
350  ptr_cb += s->uvlinesize;
351  ptr_cr += s->uvlinesize;
352  }
353 
354  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
355 
356  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
357  pix_op[s->chroma_x_shift][uvdxy]
358  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
359  pix_op[s->chroma_x_shift][uvdxy]
360  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
361  }
362  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
363  s->out_format == FMT_H261) {
365  }
366 }
367 /* apply one mpeg motion vector to the three components */
369  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
370  int field_select, uint8_t **ref_picture,
371  op_pixels_func (*pix_op)[4],
372  int motion_x, int motion_y, int h, int mb_y)
373 {
374 #if !CONFIG_SMALL
375  if (s->out_format == FMT_MPEG1)
376  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
377  field_select, ref_picture, pix_op,
378  motion_x, motion_y, h, 1, mb_y);
379  else
380 #endif
381  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
382  field_select, ref_picture, pix_op,
383  motion_x, motion_y, h, 0, mb_y);
384 }
385 
386 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
387  uint8_t *dest_cb, uint8_t *dest_cr,
388  int bottom_field, int field_select,
389  uint8_t **ref_picture,
390  op_pixels_func (*pix_op)[4],
391  int motion_x, int motion_y, int h, int mb_y)
392 {
393 #if !CONFIG_SMALL
394  if(s->out_format == FMT_MPEG1)
395  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
396  bottom_field, field_select, ref_picture, pix_op,
397  motion_x, motion_y, h, 1, mb_y);
398  else
399 #endif
400  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
401  bottom_field, field_select, ref_picture, pix_op,
402  motion_x, motion_y, h, 0, mb_y);
403 }
404 
405 // FIXME: SIMDify, avg variant, 16x16 version
406 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
407 {
408  int x;
409  uint8_t *const top = src[1];
410  uint8_t *const left = src[2];
411  uint8_t *const mid = src[0];
412  uint8_t *const right = src[3];
413  uint8_t *const bottom = src[4];
414 #define OBMC_FILTER(x, t, l, m, r, b)\
415  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
416 #define OBMC_FILTER4(x, t, l, m, r, b)\
417  OBMC_FILTER(x , t, l, m, r, b);\
418  OBMC_FILTER(x+1 , t, l, m, r, b);\
419  OBMC_FILTER(x +stride, t, l, m, r, b);\
420  OBMC_FILTER(x+1+stride, t, l, m, r, b);
421 
422  x = 0;
423  OBMC_FILTER (x , 2, 2, 4, 0, 0);
424  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
425  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
426  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
427  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
428  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
429  x += stride;
430  OBMC_FILTER (x , 1, 2, 5, 0, 0);
431  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
432  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
433  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
434  x += stride;
435  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
436  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
437  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
438  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
439  x += 2 * stride;
440  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
441  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
442  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
443  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
444  x += 2*stride;
445  OBMC_FILTER (x , 0, 2, 5, 0, 1);
446  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
447  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
448  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
449  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
450  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
451  x += stride;
452  OBMC_FILTER (x , 0, 2, 4, 0, 2);
453  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
454  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
455  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
456 }
457 
458 /* obmc for 1 8x8 luma block */
459 static inline void obmc_motion(MpegEncContext *s,
460  uint8_t *dest, uint8_t *src,
461  int src_x, int src_y,
462  op_pixels_func *pix_op,
463  int16_t mv[5][2] /* mid top left right bottom */)
464 #define MID 0
465 {
466  int i;
467  uint8_t *ptr[5];
468 
469  assert(s->quarter_sample == 0);
470 
471  for (i = 0; i < 5; i++) {
472  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
473  ptr[i] = ptr[MID];
474  } else {
475  ptr[i] = s->obmc_scratchpad + 8 * (i & 1) +
476  s->linesize * 8 * (i >> 1);
477  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
478  mv[i][0], mv[i][1]);
479  }
480  }
481 
482  put_obmc(dest, ptr, s->linesize);
483 }
484 
485 static inline void qpel_motion(MpegEncContext *s,
486  uint8_t *dest_y,
487  uint8_t *dest_cb,
488  uint8_t *dest_cr,
489  int field_based, int bottom_field,
490  int field_select, uint8_t **ref_picture,
491  op_pixels_func (*pix_op)[4],
492  qpel_mc_func (*qpix_op)[16],
493  int motion_x, int motion_y, int h)
494 {
495  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
496  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
497  ptrdiff_t linesize, uvlinesize;
498 
499  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
500 
501  src_x = s->mb_x * 16 + (motion_x >> 2);
502  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
503 
504  v_edge_pos = s->v_edge_pos >> field_based;
505  linesize = s->linesize << field_based;
506  uvlinesize = s->uvlinesize << field_based;
507 
508  if (field_based) {
509  mx = motion_x / 2;
510  my = motion_y >> 1;
511  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
512  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
513  mx = (motion_x >> 1) + rtab[motion_x & 7];
514  my = (motion_y >> 1) + rtab[motion_y & 7];
515  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
516  mx = (motion_x >> 1) | (motion_x & 1);
517  my = (motion_y >> 1) | (motion_y & 1);
518  } else {
519  mx = motion_x / 2;
520  my = motion_y / 2;
521  }
522  mx = (mx >> 1) | (mx & 1);
523  my = (my >> 1) | (my & 1);
524 
525  uvdxy = (mx & 1) | ((my & 1) << 1);
526  mx >>= 1;
527  my >>= 1;
528 
529  uvsrc_x = s->mb_x * 8 + mx;
530  uvsrc_y = s->mb_y * (8 >> field_based) + my;
531 
532  ptr_y = ref_picture[0] + src_y * linesize + src_x;
533  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
534  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
535 
536  if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 15, 0) ||
537  (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h + 1, 0)) {
539  s->linesize, s->linesize,
540  17, 17 + field_based,
541  src_x, src_y * (1 << field_based),
542  s->h_edge_pos, s->v_edge_pos);
543  ptr_y = s->edge_emu_buffer;
544  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
545  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
546  s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
547  s->uvlinesize, s->uvlinesize,
548  9, 9 + field_based,
549  uvsrc_x, uvsrc_y * (1 << field_based),
550  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
551  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
552  s->uvlinesize, s->uvlinesize,
553  9, 9 + field_based,
554  uvsrc_x, uvsrc_y * (1 << field_based),
555  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
556  ptr_cb = uvbuf;
557  ptr_cr = uvbuf + 16;
558  }
559  }
560 
561  if (!field_based)
562  qpix_op[0][dxy](dest_y, ptr_y, linesize);
563  else {
564  if (bottom_field) {
565  dest_y += s->linesize;
566  dest_cb += s->uvlinesize;
567  dest_cr += s->uvlinesize;
568  }
569 
570  if (field_select) {
571  ptr_y += s->linesize;
572  ptr_cb += s->uvlinesize;
573  ptr_cr += s->uvlinesize;
574  }
575  // damn interlaced mode
576  // FIXME boundary mirroring is not exactly correct here
577  qpix_op[1][dxy](dest_y, ptr_y, linesize);
578  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
579  }
580  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
581  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
582  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
583  }
584 }
585 
590  uint8_t *dest_cb, uint8_t *dest_cr,
591  uint8_t **ref_picture,
592  op_pixels_func *pix_op,
593  int mx, int my)
594 {
595  uint8_t *ptr;
596  int src_x, src_y, dxy, emu = 0;
597  ptrdiff_t offset;
598 
599  /* In case of 8X8, we construct a single chroma motion vector
600  * with a special rounding */
601  mx = ff_h263_round_chroma(mx);
602  my = ff_h263_round_chroma(my);
603 
604  dxy = ((my & 1) << 1) | (mx & 1);
605  mx >>= 1;
606  my >>= 1;
607 
608  src_x = s->mb_x * 8 + mx;
609  src_y = s->mb_y * 8 + my;
610  src_x = av_clip(src_x, -8, (s->width >> 1));
611  if (src_x == (s->width >> 1))
612  dxy &= ~1;
613  src_y = av_clip(src_y, -8, (s->height >> 1));
614  if (src_y == (s->height >> 1))
615  dxy &= ~2;
616 
617  offset = src_y * s->uvlinesize + src_x;
618  ptr = ref_picture[1] + offset;
619  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
620  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
622  s->uvlinesize, s->uvlinesize,
623  9, 9, src_x, src_y,
624  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
625  ptr = s->edge_emu_buffer;
626  emu = 1;
627  }
628  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
629 
630  ptr = ref_picture[2] + offset;
631  if (emu) {
633  s->uvlinesize, s->uvlinesize,
634  9, 9, src_x, src_y,
635  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
636  ptr = s->edge_emu_buffer;
637  }
638  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
639 }
640 
641 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
642 {
643  /* fetch pixels for estimated mv 4 macroblocks ahead
644  * optimized for 64byte cache lines */
645  const int shift = s->quarter_sample ? 2 : 1;
646  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
647  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
648  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
649 
650  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
651  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
652  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
653 }
654 
655 static inline void apply_obmc(MpegEncContext *s,
656  uint8_t *dest_y,
657  uint8_t *dest_cb,
658  uint8_t *dest_cr,
659  uint8_t **ref_picture,
660  op_pixels_func (*pix_op)[4])
661 {
662  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
663  Picture *cur_frame = &s->current_picture;
664  int mb_x = s->mb_x;
665  int mb_y = s->mb_y;
666  const int xy = mb_x + mb_y * s->mb_stride;
667  const int mot_stride = s->b8_stride;
668  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
669  int mx, my, i;
670 
671  assert(!s->mb_skipped);
672 
673  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
674  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
675 
676  AV_COPY32(mv_cache[2][1],
677  cur_frame->motion_val[0][mot_xy + mot_stride]);
678  AV_COPY32(mv_cache[2][2],
679  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
680 
681  AV_COPY32(mv_cache[3][1],
682  cur_frame->motion_val[0][mot_xy + mot_stride]);
683  AV_COPY32(mv_cache[3][2],
684  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
685 
686  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
687  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
688  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
689  } else {
690  AV_COPY32(mv_cache[0][1],
691  cur_frame->motion_val[0][mot_xy - mot_stride]);
692  AV_COPY32(mv_cache[0][2],
693  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
694  }
695 
696  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
697  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
698  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
699  } else {
700  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
701  AV_COPY32(mv_cache[2][0],
702  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
703  }
704 
705  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
706  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
707  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
708  } else {
709  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
710  AV_COPY32(mv_cache[2][3],
711  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
712  }
713 
714  mx = 0;
715  my = 0;
716  for (i = 0; i < 4; i++) {
717  const int x = (i & 1) + 1;
718  const int y = (i >> 1) + 1;
719  int16_t mv[5][2] = {
720  { mv_cache[y][x][0], mv_cache[y][x][1] },
721  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
722  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
723  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
724  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
725  };
726  // FIXME cleanup
727  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
728  ref_picture[0],
729  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
730  pix_op[1],
731  mv);
732 
733  mx += mv[0][0];
734  my += mv[0][1];
735  }
736  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
737  chroma_4mv_motion(s, dest_cb, dest_cr,
738  ref_picture, pix_op[1],
739  mx, my);
740 }
741 
742 static inline void apply_8x8(MpegEncContext *s,
743  uint8_t *dest_y,
744  uint8_t *dest_cb,
745  uint8_t *dest_cr,
746  int dir,
747  uint8_t **ref_picture,
748  qpel_mc_func (*qpix_op)[16],
749  op_pixels_func (*pix_op)[4])
750 {
751  int dxy, mx, my, src_x, src_y;
752  int i;
753  int mb_x = s->mb_x;
754  int mb_y = s->mb_y;
755  uint8_t *ptr, *dest;
756 
757  mx = 0;
758  my = 0;
759  if (s->quarter_sample) {
760  for (i = 0; i < 4; i++) {
761  int motion_x = s->mv[dir][i][0];
762  int motion_y = s->mv[dir][i][1];
763 
764  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
765  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
766  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
767 
768  /* WARNING: do no forget half pels */
769  src_x = av_clip(src_x, -16, s->width);
770  if (src_x == s->width)
771  dxy &= ~3;
772  src_y = av_clip(src_y, -16, s->height);
773  if (src_y == s->height)
774  dxy &= ~12;
775 
776  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
777  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
778  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
780  s->linesize, s->linesize,
781  9, 9,
782  src_x, src_y,
783  s->h_edge_pos,
784  s->v_edge_pos);
785  ptr = s->edge_emu_buffer;
786  }
787  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
788  qpix_op[1][dxy](dest, ptr, s->linesize);
789 
790  mx += s->mv[dir][i][0] / 2;
791  my += s->mv[dir][i][1] / 2;
792  }
793  } else {
794  for (i = 0; i < 4; i++) {
795  hpel_motion(s,
796  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
797  ref_picture[0],
798  mb_x * 16 + (i & 1) * 8,
799  mb_y * 16 + (i >> 1) * 8,
800  pix_op[1],
801  s->mv[dir][i][0],
802  s->mv[dir][i][1]);
803 
804  mx += s->mv[dir][i][0];
805  my += s->mv[dir][i][1];
806  }
807  }
808 
809  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
810  chroma_4mv_motion(s, dest_cb, dest_cr,
811  ref_picture, pix_op[1], mx, my);
812 }
813 
827  uint8_t *dest_y,
828  uint8_t *dest_cb,
829  uint8_t *dest_cr,
830  int dir,
831  uint8_t **ref_picture,
832  op_pixels_func (*pix_op)[4],
833  qpel_mc_func (*qpix_op)[16],
834  int is_mpeg12)
835 {
836  int i;
837  int mb_y = s->mb_y;
838 
839  prefetch_motion(s, ref_picture, dir);
840 
841  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
842  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
843  return;
844  }
845 
846  switch (s->mv_type) {
847  case MV_TYPE_16X16:
848  if (s->mcsel) {
849  if (s->real_sprite_warping_points == 1) {
850  gmc1_motion(s, dest_y, dest_cb, dest_cr,
851  ref_picture);
852  } else {
853  gmc_motion(s, dest_y, dest_cb, dest_cr,
854  ref_picture);
855  }
856  } else if (!is_mpeg12 && s->quarter_sample) {
857  qpel_motion(s, dest_y, dest_cb, dest_cr,
858  0, 0, 0,
859  ref_picture, pix_op, qpix_op,
860  s->mv[dir][0][0], s->mv[dir][0][1], 16);
861  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
862  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
863  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
864  ref_picture, pix_op,
865  s->mv[dir][0][0], s->mv[dir][0][1], 16);
866  } else {
867  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
868  ref_picture, pix_op,
869  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
870  }
871  break;
872  case MV_TYPE_8X8:
873  if (!is_mpeg12)
874  apply_8x8(s, dest_y, dest_cb, dest_cr,
875  dir, ref_picture, qpix_op, pix_op);
876  break;
877  case MV_TYPE_FIELD:
878  if (s->picture_structure == PICT_FRAME) {
879  if (!is_mpeg12 && s->quarter_sample) {
880  for (i = 0; i < 2; i++)
881  qpel_motion(s, dest_y, dest_cb, dest_cr,
882  1, i, s->field_select[dir][i],
883  ref_picture, pix_op, qpix_op,
884  s->mv[dir][i][0], s->mv[dir][i][1], 8);
885  } else {
886  /* top field */
887  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
888  0, s->field_select[dir][0],
889  ref_picture, pix_op,
890  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
891  /* bottom field */
892  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
893  1, s->field_select[dir][1],
894  ref_picture, pix_op,
895  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
896  }
897  } else {
898  if (s->picture_structure != s->field_select[dir][0] + 1 &&
899  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
900  ref_picture = s->current_picture_ptr->f->data;
901  }
902 
903  mpeg_motion(s, dest_y, dest_cb, dest_cr,
904  s->field_select[dir][0],
905  ref_picture, pix_op,
906  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
907  }
908  break;
909  case MV_TYPE_16X8:
910  for (i = 0; i < 2; i++) {
911  uint8_t **ref2picture;
912 
913  if (s->picture_structure == s->field_select[dir][i] + 1
914  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
915  ref2picture = ref_picture;
916  } else {
917  ref2picture = s->current_picture_ptr->f->data;
918  }
919 
920  mpeg_motion(s, dest_y, dest_cb, dest_cr,
921  s->field_select[dir][i],
922  ref2picture, pix_op,
923  s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
924  8, mb_y >> 1);
925 
926  dest_y += 16 * s->linesize;
927  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
928  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
929  }
930  break;
931  case MV_TYPE_DMV:
932  if (s->picture_structure == PICT_FRAME) {
933  for (i = 0; i < 2; i++) {
934  int j;
935  for (j = 0; j < 2; j++)
936  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
937  j, j ^ i, ref_picture, pix_op,
938  s->mv[dir][2 * i + j][0],
939  s->mv[dir][2 * i + j][1], 8, mb_y);
940  pix_op = s->hdsp.avg_pixels_tab;
941  }
942  } else {
943  for (i = 0; i < 2; i++) {
944  mpeg_motion(s, dest_y, dest_cb, dest_cr,
945  s->picture_structure != i + 1,
946  ref_picture, pix_op,
947  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
948  16, mb_y >> 1);
949 
950  // after put we make avg of the same block
951  pix_op = s->hdsp.avg_pixels_tab;
952 
953  /* opposite parity is always in the same frame if this is
954  * second field */
955  if (!s->first_field) {
956  ref_picture = s->current_picture_ptr->f->data;
957  }
958  }
959  }
960  break;
961  default: assert(0);
962  }
963 }
964 
966  uint8_t *dest_y, uint8_t *dest_cb,
967  uint8_t *dest_cr, int dir,
968  uint8_t **ref_picture,
969  op_pixels_func (*pix_op)[4],
970  qpel_mc_func (*qpix_op)[16])
971 {
972 #if !CONFIG_SMALL
973  if (s->out_format == FMT_MPEG1)
974  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
975  ref_picture, pix_op, qpix_op, 1);
976  else
977 #endif
978  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
979  ref_picture, pix_op, qpix_op, 0);
980 }
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:65
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:391
int sprite_warping_accuracy
Definition: mpegvideo.h:516
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:258
enum AVCodecID codec_id
Definition: mpegvideo.h:235
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:2340
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:483
int real_sprite_warping_points
Definition: mpegvideo.h:509
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
mpegvideo header.
int stride
Definition: mace.c:144
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
int chroma_x_shift
Definition: mpegvideo.h:584
int field_select[2][2]
Definition: mpegvideo.h:399
uint8_t
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
h263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
Definition: mpegvideo.h:227
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:406
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:306
quarterpel DSP functions
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:2337
#define CONFIG_H261_DECODER
Definition: config.h:514
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
Definition: mpegvideo.h:510
#define LOCAL_ALIGNED_8(t, v,...)
Definition: internal.h:108
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
Definition: mpegvideodsp.h:37
#define MID
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
static int ff_h263_round_chroma(int x)
Definition: mpegvideo.h:779
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:321
int chroma_y_shift
Definition: mpegvideo.h:585
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
Definition: mpegvideodsp.h:32
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:327
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:169
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:514
#define FFMAX(a, b)
Definition: common.h:55
common internal API header
#define CONFIG_WMV2_DECODER
Definition: config.h:628
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
Definition: mpegvideo.h:511
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:310
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:92
Picture.
Definition: mpegvideo.h:99
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:2335
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:110
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
#define OBMC_FILTER4(x, t, l, m, r, b)
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:590
static const int8_t mv[256][2]
Definition: 4xm.c:75
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:388
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:260
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:223
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:390
h261codec.
struct AVFrame * f
Definition: mpegvideo.h:100
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:261
#define CONFIG_H261_ENCODER
Definition: config.h:968
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:339
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:398
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:257
MpegEncContext.
Definition: mpegvideo.h:204
struct AVCodecContext * avctx
Definition: mpegvideo.h:221
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:356
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:256
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:637
uint8_t * dest[3]
Definition: mpegvideo.h:417
uint8_t * obmc_scratchpad
Definition: mpegvideo.h:329
Bi-dir predicted.
Definition: avutil.h:255
#define IS_INTRA(x, y)
#define PICT_FRAME
Definition: mpegutils.h:35
int picture_structure
Definition: mpegvideo.h:570
VideoDSPContext vdsp
Definition: mpegvideo.h:360
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:392
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
#define CONFIG_WMV2_ENCODER
Definition: config.h:1002
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:238
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegvideo.h:110
#define CONFIG_GRAY
Definition: config.h:359
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:244
#define av_always_inline
Definition: attributes.h:40
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:63
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:389
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)
HpelDSPContext hdsp
Definition: mpegvideo.h:353