dsputil_template.c
Go to the documentation of this file.
1 /*
2  * DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "bit_depth_template.c"
31 
32 static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
33 {
34  int i;
35  for(i=0; i<h; i++)
36  {
37  AV_WN2P(dst , AV_RN2P(src ));
38  dst+=dstStride;
39  src+=srcStride;
40  }
41 }
42 
43 static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
44 {
45  int i;
46  for(i=0; i<h; i++)
47  {
48  AV_WN4P(dst , AV_RN4P(src ));
49  dst+=dstStride;
50  src+=srcStride;
51  }
52 }
53 
54 static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
55 {
56  int i;
57  for(i=0; i<h; i++)
58  {
59  AV_WN4P(dst , AV_RN4P(src ));
60  AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
61  dst+=dstStride;
62  src+=srcStride;
63  }
64 }
65 
66 static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
67 {
68  int i;
69  for(i=0; i<h; i++)
70  {
71  AV_WN4P(dst , AV_RN4P(src ));
72  AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
73  AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
74  AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
75  dst+=dstStride;
76  src+=srcStride;
77  }
78 }
79 
80 /* draw the edges of width 'w' of an image of size width, height */
81 //FIXME check that this is ok for mpeg4 interlaced
82 static void FUNCC(draw_edges)(uint8_t *_buf, int _wrap, int width, int height, int w, int h, int sides)
83 {
84  pixel *buf = (pixel*)_buf;
85  int wrap = _wrap / sizeof(pixel);
86  pixel *ptr, *last_line;
87  int i;
88 
89  /* left and right */
90  ptr = buf;
91  for(i=0;i<height;i++) {
92 #if BIT_DEPTH > 8
93  int j;
94  for (j = 0; j < w; j++) {
95  ptr[j-w] = ptr[0];
96  ptr[j+width] = ptr[width-1];
97  }
98 #else
99  memset(ptr - w, ptr[0], w);
100  memset(ptr + width, ptr[width-1], w);
101 #endif
102  ptr += wrap;
103  }
104 
105  /* top and bottom + corners */
106  buf -= w;
107  last_line = buf + (height - 1) * wrap;
108  if (sides & EDGE_TOP)
109  for(i = 0; i < h; i++)
110  memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
111  if (sides & EDGE_BOTTOM)
112  for (i = 0; i < h; i++)
113  memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
114 }
115 
128 void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
129  int src_x, int src_y, int w, int h){
130  int x, y;
131  int start_y, start_x, end_y, end_x;
132 
133  if(src_y>= h){
134  src+= (h-1-src_y)*linesize;
135  src_y=h-1;
136  }else if(src_y<=-block_h){
137  src+= (1-block_h-src_y)*linesize;
138  src_y=1-block_h;
139  }
140  if(src_x>= w){
141  src+= (w-1-src_x)*sizeof(pixel);
142  src_x=w-1;
143  }else if(src_x<=-block_w){
144  src+= (1-block_w-src_x)*sizeof(pixel);
145  src_x=1-block_w;
146  }
147 
148  start_y= FFMAX(0, -src_y);
149  start_x= FFMAX(0, -src_x);
150  end_y= FFMIN(block_h, h-src_y);
151  end_x= FFMIN(block_w, w-src_x);
152  assert(start_y < end_y && block_h);
153  assert(start_x < end_x && block_w);
154 
155  w = end_x - start_x;
156  src += start_y*linesize + start_x*sizeof(pixel);
157  buf += start_x*sizeof(pixel);
158 
159  //top
160  for(y=0; y<start_y; y++){
161  memcpy(buf, src, w*sizeof(pixel));
162  buf += linesize;
163  }
164 
165  // copy existing part
166  for(; y<end_y; y++){
167  memcpy(buf, src, w*sizeof(pixel));
168  src += linesize;
169  buf += linesize;
170  }
171 
172  //bottom
173  src -= linesize;
174  for(; y<block_h; y++){
175  memcpy(buf, src, w*sizeof(pixel));
176  buf += linesize;
177  }
178 
179  buf -= block_h * linesize + start_x*sizeof(pixel);
180  while (block_h--){
181  pixel *bufp = (pixel*)buf;
182  //left
183  for(x=0; x<start_x; x++){
184  bufp[x] = bufp[start_x];
185  }
186 
187  //right
188  for(x=end_x; x<block_w; x++){
189  bufp[x] = bufp[end_x - 1];
190  }
191  buf += linesize;
192  }
193 }
194 
195 #define DCTELEM_FUNCS(dctcoef, suffix) \
196 static void FUNCC(get_pixels ## suffix)(DCTELEM *restrict _block, \
197  const uint8_t *_pixels, \
198  int line_size) \
199 { \
200  const pixel *pixels = (const pixel *) _pixels; \
201  dctcoef *restrict block = (dctcoef *) _block; \
202  int i; \
203  \
204  /* read the pixels */ \
205  for(i=0;i<8;i++) { \
206  block[0] = pixels[0]; \
207  block[1] = pixels[1]; \
208  block[2] = pixels[2]; \
209  block[3] = pixels[3]; \
210  block[4] = pixels[4]; \
211  block[5] = pixels[5]; \
212  block[6] = pixels[6]; \
213  block[7] = pixels[7]; \
214  pixels += line_size / sizeof(pixel); \
215  block += 8; \
216  } \
217 } \
218  \
219 static void FUNCC(add_pixels8 ## suffix)(uint8_t *restrict _pixels, \
220  DCTELEM *_block, \
221  int line_size) \
222 { \
223  int i; \
224  pixel *restrict pixels = (pixel *restrict)_pixels; \
225  dctcoef *block = (dctcoef*)_block; \
226  line_size /= sizeof(pixel); \
227  \
228  for(i=0;i<8;i++) { \
229  pixels[0] += block[0]; \
230  pixels[1] += block[1]; \
231  pixels[2] += block[2]; \
232  pixels[3] += block[3]; \
233  pixels[4] += block[4]; \
234  pixels[5] += block[5]; \
235  pixels[6] += block[6]; \
236  pixels[7] += block[7]; \
237  pixels += line_size; \
238  block += 8; \
239  } \
240 } \
241  \
242 static void FUNCC(add_pixels4 ## suffix)(uint8_t *restrict _pixels, \
243  DCTELEM *_block, \
244  int line_size) \
245 { \
246  int i; \
247  pixel *restrict pixels = (pixel *restrict)_pixels; \
248  dctcoef *block = (dctcoef*)_block; \
249  line_size /= sizeof(pixel); \
250  \
251  for(i=0;i<4;i++) { \
252  pixels[0] += block[0]; \
253  pixels[1] += block[1]; \
254  pixels[2] += block[2]; \
255  pixels[3] += block[3]; \
256  pixels += line_size; \
257  block += 4; \
258  } \
259 } \
260  \
261 static void FUNCC(clear_block ## suffix)(DCTELEM *block) \
262 { \
263  memset(block, 0, sizeof(dctcoef)*64); \
264 } \
265  \
266  \
269 static void FUNCC(clear_blocks ## suffix)(DCTELEM *blocks) \
270 { \
271  memset(blocks, 0, sizeof(dctcoef)*6*64); \
272 }
273 
275 #if BIT_DEPTH > 8
277 #endif
278 
279 #define PIXOP2(OPNAME, OP) \
280 static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
281  int i;\
282  for(i=0; i<h; i++){\
283  OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
284  pixels+=line_size;\
285  block +=line_size;\
286  }\
287 }\
288 static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
289  int i;\
290  for(i=0; i<h; i++){\
291  OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
292  pixels+=line_size;\
293  block +=line_size;\
294  }\
295 }\
296 static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
297  int i;\
298  for(i=0; i<h; i++){\
299  OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
300  OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
301  pixels+=line_size;\
302  block +=line_size;\
303  }\
304 }\
305 static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
306  FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
307 }\
308 \
309 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
310  int src_stride1, int src_stride2, int h){\
311  int i;\
312  for(i=0; i<h; i++){\
313  pixel4 a,b;\
314  a= AV_RN4P(&src1[i*src_stride1 ]);\
315  b= AV_RN4P(&src2[i*src_stride2 ]);\
316  OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
317  a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
318  b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
319  OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
320  }\
321 }\
322 \
323 static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
324  int src_stride1, int src_stride2, int h){\
325  int i;\
326  for(i=0; i<h; i++){\
327  pixel4 a,b;\
328  a= AV_RN4P(&src1[i*src_stride1 ]);\
329  b= AV_RN4P(&src2[i*src_stride2 ]);\
330  OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
331  a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
332  b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
333  OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
334  }\
335 }\
336 \
337 static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
338  int src_stride1, int src_stride2, int h){\
339  int i;\
340  for(i=0; i<h; i++){\
341  pixel4 a,b;\
342  a= AV_RN4P(&src1[i*src_stride1 ]);\
343  b= AV_RN4P(&src2[i*src_stride2 ]);\
344  OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
345  }\
346 }\
347 \
348 static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
349  int src_stride1, int src_stride2, int h){\
350  int i;\
351  for(i=0; i<h; i++){\
352  pixel4 a,b;\
353  a= AV_RN2P(&src1[i*src_stride1 ]);\
354  b= AV_RN2P(&src2[i*src_stride2 ]);\
355  OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
356  }\
357 }\
358 \
359 static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
360  int src_stride1, int src_stride2, int h){\
361  FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
362  FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
363 }\
364 \
365 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
366  int src_stride1, int src_stride2, int h){\
367  FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
368  FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
369 }\
370 \
371 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
372  FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
373 }\
374 \
375 static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
376  FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
377 }\
378 \
379 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
380  FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
381 }\
382 \
383 static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
384  FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
385 }\
386 \
387 static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
388  int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
389  /* FIXME HIGH BIT DEPTH */\
390  int i;\
391  for(i=0; i<h; i++){\
392  uint32_t a, b, c, d, l0, l1, h0, h1;\
393  a= AV_RN32(&src1[i*src_stride1]);\
394  b= AV_RN32(&src2[i*src_stride2]);\
395  c= AV_RN32(&src3[i*src_stride3]);\
396  d= AV_RN32(&src4[i*src_stride4]);\
397  l0= (a&0x03030303UL)\
398  + (b&0x03030303UL)\
399  + 0x02020202UL;\
400  h0= ((a&0xFCFCFCFCUL)>>2)\
401  + ((b&0xFCFCFCFCUL)>>2);\
402  l1= (c&0x03030303UL)\
403  + (d&0x03030303UL);\
404  h1= ((c&0xFCFCFCFCUL)>>2)\
405  + ((d&0xFCFCFCFCUL)>>2);\
406  OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
407  a= AV_RN32(&src1[i*src_stride1+4]);\
408  b= AV_RN32(&src2[i*src_stride2+4]);\
409  c= AV_RN32(&src3[i*src_stride3+4]);\
410  d= AV_RN32(&src4[i*src_stride4+4]);\
411  l0= (a&0x03030303UL)\
412  + (b&0x03030303UL)\
413  + 0x02020202UL;\
414  h0= ((a&0xFCFCFCFCUL)>>2)\
415  + ((b&0xFCFCFCFCUL)>>2);\
416  l1= (c&0x03030303UL)\
417  + (d&0x03030303UL);\
418  h1= ((c&0xFCFCFCFCUL)>>2)\
419  + ((d&0xFCFCFCFCUL)>>2);\
420  OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
421  }\
422 }\
423 \
424 static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
425  FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
426 }\
427 \
428 static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
429  FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
430 }\
431 \
432 static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
433  FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
434 }\
435 \
436 static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
437  FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
438 }\
439 \
440 static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
441  int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
442  /* FIXME HIGH BIT DEPTH*/\
443  int i;\
444  for(i=0; i<h; i++){\
445  uint32_t a, b, c, d, l0, l1, h0, h1;\
446  a= AV_RN32(&src1[i*src_stride1]);\
447  b= AV_RN32(&src2[i*src_stride2]);\
448  c= AV_RN32(&src3[i*src_stride3]);\
449  d= AV_RN32(&src4[i*src_stride4]);\
450  l0= (a&0x03030303UL)\
451  + (b&0x03030303UL)\
452  + 0x01010101UL;\
453  h0= ((a&0xFCFCFCFCUL)>>2)\
454  + ((b&0xFCFCFCFCUL)>>2);\
455  l1= (c&0x03030303UL)\
456  + (d&0x03030303UL);\
457  h1= ((c&0xFCFCFCFCUL)>>2)\
458  + ((d&0xFCFCFCFCUL)>>2);\
459  OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
460  a= AV_RN32(&src1[i*src_stride1+4]);\
461  b= AV_RN32(&src2[i*src_stride2+4]);\
462  c= AV_RN32(&src3[i*src_stride3+4]);\
463  d= AV_RN32(&src4[i*src_stride4+4]);\
464  l0= (a&0x03030303UL)\
465  + (b&0x03030303UL)\
466  + 0x01010101UL;\
467  h0= ((a&0xFCFCFCFCUL)>>2)\
468  + ((b&0xFCFCFCFCUL)>>2);\
469  l1= (c&0x03030303UL)\
470  + (d&0x03030303UL);\
471  h1= ((c&0xFCFCFCFCUL)>>2)\
472  + ((d&0xFCFCFCFCUL)>>2);\
473  OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
474  }\
475 }\
476 static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
477  int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
478  FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
479  FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
480 }\
481 static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
482  int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
483  FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
484  FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
485 }\
486 \
487 static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *_block, const uint8_t *_pixels, int line_size, int h)\
488 {\
489  int i, a0, b0, a1, b1;\
490  pixel *block = (pixel*)_block;\
491  const pixel *pixels = (const pixel*)_pixels;\
492  line_size /= sizeof(pixel);\
493  a0= pixels[0];\
494  b0= pixels[1] + 2;\
495  a0 += b0;\
496  b0 += pixels[2];\
497 \
498  pixels+=line_size;\
499  for(i=0; i<h; i+=2){\
500  a1= pixels[0];\
501  b1= pixels[1];\
502  a1 += b1;\
503  b1 += pixels[2];\
504 \
505  block[0]= (a1+a0)>>2; /* FIXME non put */\
506  block[1]= (b1+b0)>>2;\
507 \
508  pixels+=line_size;\
509  block +=line_size;\
510 \
511  a0= pixels[0];\
512  b0= pixels[1] + 2;\
513  a0 += b0;\
514  b0 += pixels[2];\
515 \
516  block[0]= (a1+a0)>>2;\
517  block[1]= (b1+b0)>>2;\
518  pixels+=line_size;\
519  block +=line_size;\
520  }\
521 }\
522 \
523 static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
524 {\
525  /* FIXME HIGH BIT DEPTH */\
526  int i;\
527  const uint32_t a= AV_RN32(pixels );\
528  const uint32_t b= AV_RN32(pixels+1);\
529  uint32_t l0= (a&0x03030303UL)\
530  + (b&0x03030303UL)\
531  + 0x02020202UL;\
532  uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
533  + ((b&0xFCFCFCFCUL)>>2);\
534  uint32_t l1,h1;\
535 \
536  pixels+=line_size;\
537  for(i=0; i<h; i+=2){\
538  uint32_t a= AV_RN32(pixels );\
539  uint32_t b= AV_RN32(pixels+1);\
540  l1= (a&0x03030303UL)\
541  + (b&0x03030303UL);\
542  h1= ((a&0xFCFCFCFCUL)>>2)\
543  + ((b&0xFCFCFCFCUL)>>2);\
544  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
545  pixels+=line_size;\
546  block +=line_size;\
547  a= AV_RN32(pixels );\
548  b= AV_RN32(pixels+1);\
549  l0= (a&0x03030303UL)\
550  + (b&0x03030303UL)\
551  + 0x02020202UL;\
552  h0= ((a&0xFCFCFCFCUL)>>2)\
553  + ((b&0xFCFCFCFCUL)>>2);\
554  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
555  pixels+=line_size;\
556  block +=line_size;\
557  }\
558 }\
559 \
560 static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
561 {\
562  /* FIXME HIGH BIT DEPTH */\
563  int j;\
564  for(j=0; j<2; j++){\
565  int i;\
566  const uint32_t a= AV_RN32(pixels );\
567  const uint32_t b= AV_RN32(pixels+1);\
568  uint32_t l0= (a&0x03030303UL)\
569  + (b&0x03030303UL)\
570  + 0x02020202UL;\
571  uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
572  + ((b&0xFCFCFCFCUL)>>2);\
573  uint32_t l1,h1;\
574 \
575  pixels+=line_size;\
576  for(i=0; i<h; i+=2){\
577  uint32_t a= AV_RN32(pixels );\
578  uint32_t b= AV_RN32(pixels+1);\
579  l1= (a&0x03030303UL)\
580  + (b&0x03030303UL);\
581  h1= ((a&0xFCFCFCFCUL)>>2)\
582  + ((b&0xFCFCFCFCUL)>>2);\
583  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
584  pixels+=line_size;\
585  block +=line_size;\
586  a= AV_RN32(pixels );\
587  b= AV_RN32(pixels+1);\
588  l0= (a&0x03030303UL)\
589  + (b&0x03030303UL)\
590  + 0x02020202UL;\
591  h0= ((a&0xFCFCFCFCUL)>>2)\
592  + ((b&0xFCFCFCFCUL)>>2);\
593  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
594  pixels+=line_size;\
595  block +=line_size;\
596  }\
597  pixels+=4-line_size*(h+1);\
598  block +=4-line_size*h;\
599  }\
600 }\
601 \
602 static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
603 {\
604  /* FIXME HIGH BIT DEPTH */\
605  int j;\
606  for(j=0; j<2; j++){\
607  int i;\
608  const uint32_t a= AV_RN32(pixels );\
609  const uint32_t b= AV_RN32(pixels+1);\
610  uint32_t l0= (a&0x03030303UL)\
611  + (b&0x03030303UL)\
612  + 0x01010101UL;\
613  uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
614  + ((b&0xFCFCFCFCUL)>>2);\
615  uint32_t l1,h1;\
616 \
617  pixels+=line_size;\
618  for(i=0; i<h; i+=2){\
619  uint32_t a= AV_RN32(pixels );\
620  uint32_t b= AV_RN32(pixels+1);\
621  l1= (a&0x03030303UL)\
622  + (b&0x03030303UL);\
623  h1= ((a&0xFCFCFCFCUL)>>2)\
624  + ((b&0xFCFCFCFCUL)>>2);\
625  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
626  pixels+=line_size;\
627  block +=line_size;\
628  a= AV_RN32(pixels );\
629  b= AV_RN32(pixels+1);\
630  l0= (a&0x03030303UL)\
631  + (b&0x03030303UL)\
632  + 0x01010101UL;\
633  h0= ((a&0xFCFCFCFCUL)>>2)\
634  + ((b&0xFCFCFCFCUL)>>2);\
635  OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
636  pixels+=line_size;\
637  block +=line_size;\
638  }\
639  pixels+=4-line_size*(h+1);\
640  block +=4-line_size*h;\
641  }\
642 }\
643 \
644 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
645 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
646 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
647 CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
648 av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
649 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
650 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
651 CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
652 
653 #define op_avg(a, b) a = rnd_avg_pixel4(a, b)
654 #define op_put(a, b) a = b
655 
656 PIXOP2(avg, op_avg)
657 PIXOP2(put, op_put)
658 #undef op_avg
659 #undef op_put
661 #define put_no_rnd_pixels8_c put_pixels8_c
662 #define put_no_rnd_pixels16_c put_pixels16_c
663 
664 static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
666 }
667 
668 static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
670 }
671 
672 #define H264_CHROMA_MC(OPNAME, OP)\
673 static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
674  pixel *dst = (pixel*)_dst;\
675  pixel *src = (pixel*)_src;\
676  const int A=(8-x)*(8-y);\
677  const int B=( x)*(8-y);\
678  const int C=(8-x)*( y);\
679  const int D=( x)*( y);\
680  int i;\
681  stride /= sizeof(pixel);\
682  \
683  assert(x<8 && y<8 && x>=0 && y>=0);\
684 \
685  if(D){\
686  for(i=0; i<h; i++){\
687  OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
688  OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
689  dst+= stride;\
690  src+= stride;\
691  }\
692  }else{\
693  const int E= B+C;\
694  const int step= C ? stride : 1;\
695  for(i=0; i<h; i++){\
696  OP(dst[0], (A*src[0] + E*src[step+0]));\
697  OP(dst[1], (A*src[1] + E*src[step+1]));\
698  dst+= stride;\
699  src+= stride;\
700  }\
701  }\
702 }\
703 \
704 static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
705  pixel *dst = (pixel*)_dst;\
706  pixel *src = (pixel*)_src;\
707  const int A=(8-x)*(8-y);\
708  const int B=( x)*(8-y);\
709  const int C=(8-x)*( y);\
710  const int D=( x)*( y);\
711  int i;\
712  stride /= sizeof(pixel);\
713  \
714  assert(x<8 && y<8 && x>=0 && y>=0);\
715 \
716  if(D){\
717  for(i=0; i<h; i++){\
718  OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
719  OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
720  OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
721  OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
722  dst+= stride;\
723  src+= stride;\
724  }\
725  }else{\
726  const int E= B+C;\
727  const int step= C ? stride : 1;\
728  for(i=0; i<h; i++){\
729  OP(dst[0], (A*src[0] + E*src[step+0]));\
730  OP(dst[1], (A*src[1] + E*src[step+1]));\
731  OP(dst[2], (A*src[2] + E*src[step+2]));\
732  OP(dst[3], (A*src[3] + E*src[step+3]));\
733  dst+= stride;\
734  src+= stride;\
735  }\
736  }\
737 }\
738 \
739 static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
740  pixel *dst = (pixel*)_dst;\
741  pixel *src = (pixel*)_src;\
742  const int A=(8-x)*(8-y);\
743  const int B=( x)*(8-y);\
744  const int C=(8-x)*( y);\
745  const int D=( x)*( y);\
746  int i;\
747  stride /= sizeof(pixel);\
748  \
749  assert(x<8 && y<8 && x>=0 && y>=0);\
750 \
751  if(D){\
752  for(i=0; i<h; i++){\
753  OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
754  OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
755  OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
756  OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
757  OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
758  OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
759  OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
760  OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
761  dst+= stride;\
762  src+= stride;\
763  }\
764  }else{\
765  const int E= B+C;\
766  const int step= C ? stride : 1;\
767  for(i=0; i<h; i++){\
768  OP(dst[0], (A*src[0] + E*src[step+0]));\
769  OP(dst[1], (A*src[1] + E*src[step+1]));\
770  OP(dst[2], (A*src[2] + E*src[step+2]));\
771  OP(dst[3], (A*src[3] + E*src[step+3]));\
772  OP(dst[4], (A*src[4] + E*src[step+4]));\
773  OP(dst[5], (A*src[5] + E*src[step+5]));\
774  OP(dst[6], (A*src[6] + E*src[step+6]));\
775  OP(dst[7], (A*src[7] + E*src[step+7]));\
776  dst+= stride;\
777  src+= stride;\
778  }\
779  }\
780 }
781 
782 #define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
783 #define op_put(a, b) a = (((b) + 32)>>6)
784 
785 H264_CHROMA_MC(put_ , op_put)
786 H264_CHROMA_MC(avg_ , op_avg)
787 #undef op_avg
788 #undef op_put
789 
790 #define H264_LOWPASS(OPNAME, OP, OP2) \
791 static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
792  const int h=2;\
793  INIT_CLIP\
794  int i;\
795  pixel *dst = (pixel*)_dst;\
796  pixel *src = (pixel*)_src;\
797  dstStride /= sizeof(pixel);\
798  srcStride /= sizeof(pixel);\
799  for(i=0; i<h; i++)\
800  {\
801  OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
802  OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
803  dst+=dstStride;\
804  src+=srcStride;\
805  }\
806 }\
807 \
808 static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
809  const int w=2;\
810  INIT_CLIP\
811  int i;\
812  pixel *dst = (pixel*)_dst;\
813  pixel *src = (pixel*)_src;\
814  dstStride /= sizeof(pixel);\
815  srcStride /= sizeof(pixel);\
816  for(i=0; i<w; i++)\
817  {\
818  const int srcB= src[-2*srcStride];\
819  const int srcA= src[-1*srcStride];\
820  const int src0= src[0 *srcStride];\
821  const int src1= src[1 *srcStride];\
822  const int src2= src[2 *srcStride];\
823  const int src3= src[3 *srcStride];\
824  const int src4= src[4 *srcStride];\
825  OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
826  OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
827  dst++;\
828  src++;\
829  }\
830 }\
831 \
832 static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
833  const int h=2;\
834  const int w=2;\
835  const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
836  INIT_CLIP\
837  int i;\
838  pixel *dst = (pixel*)_dst;\
839  pixel *src = (pixel*)_src;\
840  dstStride /= sizeof(pixel);\
841  srcStride /= sizeof(pixel);\
842  src -= 2*srcStride;\
843  for(i=0; i<h+5; i++)\
844  {\
845  tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
846  tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
847  tmp+=tmpStride;\
848  src+=srcStride;\
849  }\
850  tmp -= tmpStride*(h+5-2);\
851  for(i=0; i<w; i++)\
852  {\
853  const int tmpB= tmp[-2*tmpStride] - pad;\
854  const int tmpA= tmp[-1*tmpStride] - pad;\
855  const int tmp0= tmp[0 *tmpStride] - pad;\
856  const int tmp1= tmp[1 *tmpStride] - pad;\
857  const int tmp2= tmp[2 *tmpStride] - pad;\
858  const int tmp3= tmp[3 *tmpStride] - pad;\
859  const int tmp4= tmp[4 *tmpStride] - pad;\
860  OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
861  OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
862  dst++;\
863  tmp++;\
864  }\
865 }\
866 static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
867  const int h=4;\
868  INIT_CLIP\
869  int i;\
870  pixel *dst = (pixel*)_dst;\
871  pixel *src = (pixel*)_src;\
872  dstStride /= sizeof(pixel);\
873  srcStride /= sizeof(pixel);\
874  for(i=0; i<h; i++)\
875  {\
876  OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
877  OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
878  OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
879  OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
880  dst+=dstStride;\
881  src+=srcStride;\
882  }\
883 }\
884 \
885 static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
886  const int w=4;\
887  INIT_CLIP\
888  int i;\
889  pixel *dst = (pixel*)_dst;\
890  pixel *src = (pixel*)_src;\
891  dstStride /= sizeof(pixel);\
892  srcStride /= sizeof(pixel);\
893  for(i=0; i<w; i++)\
894  {\
895  const int srcB= src[-2*srcStride];\
896  const int srcA= src[-1*srcStride];\
897  const int src0= src[0 *srcStride];\
898  const int src1= src[1 *srcStride];\
899  const int src2= src[2 *srcStride];\
900  const int src3= src[3 *srcStride];\
901  const int src4= src[4 *srcStride];\
902  const int src5= src[5 *srcStride];\
903  const int src6= src[6 *srcStride];\
904  OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
905  OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
906  OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
907  OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
908  dst++;\
909  src++;\
910  }\
911 }\
912 \
913 static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
914  const int h=4;\
915  const int w=4;\
916  const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
917  INIT_CLIP\
918  int i;\
919  pixel *dst = (pixel*)_dst;\
920  pixel *src = (pixel*)_src;\
921  dstStride /= sizeof(pixel);\
922  srcStride /= sizeof(pixel);\
923  src -= 2*srcStride;\
924  for(i=0; i<h+5; i++)\
925  {\
926  tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
927  tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
928  tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
929  tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
930  tmp+=tmpStride;\
931  src+=srcStride;\
932  }\
933  tmp -= tmpStride*(h+5-2);\
934  for(i=0; i<w; i++)\
935  {\
936  const int tmpB= tmp[-2*tmpStride] - pad;\
937  const int tmpA= tmp[-1*tmpStride] - pad;\
938  const int tmp0= tmp[0 *tmpStride] - pad;\
939  const int tmp1= tmp[1 *tmpStride] - pad;\
940  const int tmp2= tmp[2 *tmpStride] - pad;\
941  const int tmp3= tmp[3 *tmpStride] - pad;\
942  const int tmp4= tmp[4 *tmpStride] - pad;\
943  const int tmp5= tmp[5 *tmpStride] - pad;\
944  const int tmp6= tmp[6 *tmpStride] - pad;\
945  OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
946  OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
947  OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
948  OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
949  dst++;\
950  tmp++;\
951  }\
952 }\
953 \
954 static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
955  const int h=8;\
956  INIT_CLIP\
957  int i;\
958  pixel *dst = (pixel*)_dst;\
959  pixel *src = (pixel*)_src;\
960  dstStride /= sizeof(pixel);\
961  srcStride /= sizeof(pixel);\
962  for(i=0; i<h; i++)\
963  {\
964  OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
965  OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
966  OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
967  OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
968  OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
969  OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
970  OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
971  OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
972  dst+=dstStride;\
973  src+=srcStride;\
974  }\
975 }\
976 \
977 static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
978  const int w=8;\
979  INIT_CLIP\
980  int i;\
981  pixel *dst = (pixel*)_dst;\
982  pixel *src = (pixel*)_src;\
983  dstStride /= sizeof(pixel);\
984  srcStride /= sizeof(pixel);\
985  for(i=0; i<w; i++)\
986  {\
987  const int srcB= src[-2*srcStride];\
988  const int srcA= src[-1*srcStride];\
989  const int src0= src[0 *srcStride];\
990  const int src1= src[1 *srcStride];\
991  const int src2= src[2 *srcStride];\
992  const int src3= src[3 *srcStride];\
993  const int src4= src[4 *srcStride];\
994  const int src5= src[5 *srcStride];\
995  const int src6= src[6 *srcStride];\
996  const int src7= src[7 *srcStride];\
997  const int src8= src[8 *srcStride];\
998  const int src9= src[9 *srcStride];\
999  const int src10=src[10*srcStride];\
1000  OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1001  OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1002  OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1003  OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1004  OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1005  OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1006  OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1007  OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1008  dst++;\
1009  src++;\
1010  }\
1011 }\
1012 \
1013 static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
1014  const int h=8;\
1015  const int w=8;\
1016  const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1017  INIT_CLIP\
1018  int i;\
1019  pixel *dst = (pixel*)_dst;\
1020  pixel *src = (pixel*)_src;\
1021  dstStride /= sizeof(pixel);\
1022  srcStride /= sizeof(pixel);\
1023  src -= 2*srcStride;\
1024  for(i=0; i<h+5; i++)\
1025  {\
1026  tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
1027  tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
1028  tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
1029  tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
1030  tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
1031  tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
1032  tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
1033  tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
1034  tmp+=tmpStride;\
1035  src+=srcStride;\
1036  }\
1037  tmp -= tmpStride*(h+5-2);\
1038  for(i=0; i<w; i++)\
1039  {\
1040  const int tmpB= tmp[-2*tmpStride] - pad;\
1041  const int tmpA= tmp[-1*tmpStride] - pad;\
1042  const int tmp0= tmp[0 *tmpStride] - pad;\
1043  const int tmp1= tmp[1 *tmpStride] - pad;\
1044  const int tmp2= tmp[2 *tmpStride] - pad;\
1045  const int tmp3= tmp[3 *tmpStride] - pad;\
1046  const int tmp4= tmp[4 *tmpStride] - pad;\
1047  const int tmp5= tmp[5 *tmpStride] - pad;\
1048  const int tmp6= tmp[6 *tmpStride] - pad;\
1049  const int tmp7= tmp[7 *tmpStride] - pad;\
1050  const int tmp8= tmp[8 *tmpStride] - pad;\
1051  const int tmp9= tmp[9 *tmpStride] - pad;\
1052  const int tmp10=tmp[10*tmpStride] - pad;\
1053  OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1054  OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1055  OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1056  OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1057  OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
1058  OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
1059  OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
1060  OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
1061  dst++;\
1062  tmp++;\
1063  }\
1064 }\
1065 \
1066 static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1067  FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1068  FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1069  src += 8*srcStride;\
1070  dst += 8*dstStride;\
1071  FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
1072  FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1073 }\
1074 \
1075 static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1076  FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1077  FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1078  src += 8*srcStride;\
1079  dst += 8*dstStride;\
1080  FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
1081  FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1082 }\
1083 \
1084 static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1085  FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1086  FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1087  src += 8*srcStride;\
1088  dst += 8*dstStride;\
1089  FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
1090  FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1091 }\
1092 
1093 #define H264_MC(OPNAME, SIZE) \
1094 static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
1095  FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
1096 }\
1097 \
1098 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
1099  uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1100  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1101  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1102 }\
1103 \
1104 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
1105  FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
1106 }\
1107 \
1108 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
1109  uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1110  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1111  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1112 }\
1113 \
1114 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
1115  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1116  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1117  uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1118  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1119  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1120  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1121 }\
1122 \
1123 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
1124  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1125  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1126  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1127  FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
1128 }\
1129 \
1130 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
1131  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1132  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1133  uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1134  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1135  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1136  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1137 }\
1138 \
1139 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
1140  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1141  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1142  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1143  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1144  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1145  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1146  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1147  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1148 }\
1149 \
1150 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
1151  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1152  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1153  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1154  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1155  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1156  FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1157  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1158  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1159 }\
1160 \
1161 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
1162  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1163  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1164  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1165  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1166  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1167  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1168  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1169  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1170 }\
1171 \
1172 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
1173  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1174  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1175  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1176  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1177  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1178  FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1179  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1180  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1181 }\
1182 \
1183 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
1184  int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1185  FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
1186 }\
1187 \
1188 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
1189  int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1190  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1191  uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1192  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1193  FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1194  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1195 }\
1196 \
1197 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
1198  int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1199  uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1200  uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1201  FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1202  FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1203  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1204 }\
1205 \
1206 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
1207  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1208  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1209  int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1210  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1211  uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1212  FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
1213  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1214  FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1215  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1216 }\
1217 \
1218 static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
1219  uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1220  uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1221  int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1222  uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1223  uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1224  FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
1225  FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1226  FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1227  FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1228 }\
1229 
1230 #define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
1231 //#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
1232 #define op_put(a, b) a = CLIP(((b) + 16)>>5)
1233 #define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
1234 #define op2_put(a, b) a = CLIP(((b) + 512)>>10)
1235 
1236 H264_LOWPASS(put_ , op_put, op2_put)
1237 H264_LOWPASS(avg_ , op_avg, op2_avg)
1238 H264_MC(put_, 2)
1239 H264_MC(put_, 4)
1240 H264_MC(put_, 8)
1241 H264_MC(put_, 16)
1242 H264_MC(avg_, 4)
1243 H264_MC(avg_, 8)
1244 H264_MC(avg_, 16)
1245 
1246 #undef op_avg
1247 #undef op_put
1248 #undef op2_avg
1249 #undef op2_put
1251 #if BIT_DEPTH == 8
1252 # define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
1253 # define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
1254 # define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
1255 # define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
1256 #elif BIT_DEPTH == 9
1257 # define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
1258 # define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
1259 # define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
1260 # define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
1261 #elif BIT_DEPTH == 10
1262 # define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
1263 # define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
1264 # define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
1265 # define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
1266 #endif
1267 
1268 void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1269  FUNCC(put_pixels8)(dst, src, stride, 8);
1270 }
1271 void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1272  FUNCC(avg_pixels8)(dst, src, stride, 8);
1273 }
1274 void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1275  FUNCC(put_pixels16)(dst, src, stride, 16);
1276 }
1277 void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1278  FUNCC(avg_pixels16)(dst, src, stride, 16);
1279 }