dsputil_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Brian Foley
3  * Copyright (c) 2002 Dieter Shirley
4  * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #if HAVE_ALTIVEC_H
25 #include <altivec.h>
26 #endif
27 #include "libavcodec/dsputil.h"
28 #include "util_altivec.h"
29 #include "types_altivec.h"
30 #include "dsputil_altivec.h"
31 
32 static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
33 {
34  int i;
35  int s;
36  const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
37  vector unsigned char *tv;
38  vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
39  vector unsigned int sad;
40  vector signed int sumdiffs;
41 
42  s = 0;
43  sad = (vector unsigned int)vec_splat_u32(0);
44  for (i = 0; i < h; i++) {
45  /* Read unaligned pixels into our vectors. The vectors are as follows:
46  pix1v: pix1[0]-pix1[15]
47  pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
48  tv = (vector unsigned char *) pix1;
49  pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
50 
51  tv = (vector unsigned char *) &pix2[0];
52  pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
53 
54  tv = (vector unsigned char *) &pix2[1];
55  pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
56 
57  /* Calculate the average vector */
58  avgv = vec_avg(pix2v, pix2iv);
59 
60  /* Calculate a sum of abs differences vector */
61  t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
62 
63  /* Add each 4 pixel group together and put 4 results into sad */
64  sad = vec_sum4s(t5, sad);
65 
66  pix1 += line_size;
67  pix2 += line_size;
68  }
69  /* Sum up the four partial sums, and put the result into s */
70  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
71  sumdiffs = vec_splat(sumdiffs, 3);
72  vec_ste(sumdiffs, 0, &s);
73 
74  return s;
75 }
76 
77 static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
78 {
79  int i;
80  int s;
81  const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
82  vector unsigned char *tv;
83  vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
84  vector unsigned int sad;
85  vector signed int sumdiffs;
86  uint8_t *pix3 = pix2 + line_size;
87 
88  s = 0;
89  sad = (vector unsigned int)vec_splat_u32(0);
90 
91  /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
92  iteration becomes pix2 in the next iteration. We can use this
93  fact to avoid a potentially expensive unaligned read, each
94  time around the loop.
95  Read unaligned pixels into our vectors. The vectors are as follows:
96  pix2v: pix2[0]-pix2[15]
97  Split the pixel vectors into shorts */
98  tv = (vector unsigned char *) &pix2[0];
99  pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
100 
101  for (i = 0; i < h; i++) {
102  /* Read unaligned pixels into our vectors. The vectors are as follows:
103  pix1v: pix1[0]-pix1[15]
104  pix3v: pix3[0]-pix3[15] */
105  tv = (vector unsigned char *) pix1;
106  pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
107 
108  tv = (vector unsigned char *) &pix3[0];
109  pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
110 
111  /* Calculate the average vector */
112  avgv = vec_avg(pix2v, pix3v);
113 
114  /* Calculate a sum of abs differences vector */
115  t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
116 
117  /* Add each 4 pixel group together and put 4 results into sad */
118  sad = vec_sum4s(t5, sad);
119 
120  pix1 += line_size;
121  pix2v = pix3v;
122  pix3 += line_size;
123 
124  }
125 
126  /* Sum up the four partial sums, and put the result into s */
127  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
128  sumdiffs = vec_splat(sumdiffs, 3);
129  vec_ste(sumdiffs, 0, &s);
130  return s;
131 }
132 
133 static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
134 {
135  int i;
136  int s;
137  uint8_t *pix3 = pix2 + line_size;
138  const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
139  const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
140  vector unsigned char *tv, avgv, t5;
141  vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
142  vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
143  vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
144  vector unsigned short avghv, avglv;
145  vector unsigned short t1, t2, t3, t4;
146  vector unsigned int sad;
147  vector signed int sumdiffs;
148 
149  sad = (vector unsigned int)vec_splat_u32(0);
150 
151  s = 0;
152 
153  /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
154  iteration becomes pix2 in the next iteration. We can use this
155  fact to avoid a potentially expensive unaligned read, as well
156  as some splitting, and vector addition each time around the loop.
157  Read unaligned pixels into our vectors. The vectors are as follows:
158  pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
159  Split the pixel vectors into shorts */
160  tv = (vector unsigned char *) &pix2[0];
161  pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
162 
163  tv = (vector unsigned char *) &pix2[1];
164  pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
165 
166  pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
167  pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
168  pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
169  pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
170  t1 = vec_add(pix2hv, pix2ihv);
171  t2 = vec_add(pix2lv, pix2ilv);
172 
173  for (i = 0; i < h; i++) {
174  /* Read unaligned pixels into our vectors. The vectors are as follows:
175  pix1v: pix1[0]-pix1[15]
176  pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
177  tv = (vector unsigned char *) pix1;
178  pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
179 
180  tv = (vector unsigned char *) &pix3[0];
181  pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
182 
183  tv = (vector unsigned char *) &pix3[1];
184  pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
185 
186  /* Note that AltiVec does have vec_avg, but this works on vector pairs
187  and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
188  would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
189  Instead, we have to split the pixel vectors into vectors of shorts,
190  and do the averaging by hand. */
191 
192  /* Split the pixel vectors into shorts */
193  pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
194  pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
195  pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
196  pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
197 
198  /* Do the averaging on them */
199  t3 = vec_add(pix3hv, pix3ihv);
200  t4 = vec_add(pix3lv, pix3ilv);
201 
202  avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
203  avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
204 
205  /* Pack the shorts back into a result */
206  avgv = vec_pack(avghv, avglv);
207 
208  /* Calculate a sum of abs differences vector */
209  t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
210 
211  /* Add each 4 pixel group together and put 4 results into sad */
212  sad = vec_sum4s(t5, sad);
213 
214  pix1 += line_size;
215  pix3 += line_size;
216  /* Transfer the calculated values for pix3 into pix2 */
217  t1 = t3;
218  t2 = t4;
219  }
220  /* Sum up the four partial sums, and put the result into s */
221  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
222  sumdiffs = vec_splat(sumdiffs, 3);
223  vec_ste(sumdiffs, 0, &s);
224 
225  return s;
226 }
227 
228 static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
229 {
230  int i;
231  int s;
232  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
233  vector unsigned char perm1, perm2, pix1v_low, pix1v_high, pix2v_low, pix2v_high;
234  vector unsigned char t1, t2, t3,t4, t5;
235  vector unsigned int sad;
236  vector signed int sumdiffs;
237 
238  sad = (vector unsigned int)vec_splat_u32(0);
239 
240 
241  for (i = 0; i < h; i++) {
242  /* Read potentially unaligned pixels into t1 and t2 */
243  perm1 = vec_lvsl(0, pix1);
244  pix1v_high = vec_ld( 0, pix1);
245  pix1v_low = vec_ld(15, pix1);
246  perm2 = vec_lvsl(0, pix2);
247  pix2v_high = vec_ld( 0, pix2);
248  pix2v_low = vec_ld(15, pix2);
249  t1 = vec_perm(pix1v_high, pix1v_low, perm1);
250  t2 = vec_perm(pix2v_high, pix2v_low, perm2);
251 
252  /* Calculate a sum of abs differences vector */
253  t3 = vec_max(t1, t2);
254  t4 = vec_min(t1, t2);
255  t5 = vec_sub(t3, t4);
256 
257  /* Add each 4 pixel group together and put 4 results into sad */
258  sad = vec_sum4s(t5, sad);
259 
260  pix1 += line_size;
261  pix2 += line_size;
262  }
263 
264  /* Sum up the four partial sums, and put the result into s */
265  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
266  sumdiffs = vec_splat(sumdiffs, 3);
267  vec_ste(sumdiffs, 0, &s);
268 
269  return s;
270 }
271 
272 static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
273 {
274  int i;
275  int s;
276  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
277  vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
278  vector unsigned char t1, t2, t3,t4, t5;
279  vector unsigned int sad;
280  vector signed int sumdiffs;
281 
282  sad = (vector unsigned int)vec_splat_u32(0);
283 
284  permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
285 
286  for (i = 0; i < h; i++) {
287  /* Read potentially unaligned pixels into t1 and t2
288  Since we're reading 16 pixels, and actually only want 8,
289  mask out the last 8 pixels. The 0s don't change the sum. */
290  perm1 = vec_lvsl(0, pix1);
291  pix1v = (vector unsigned char *) pix1;
292  perm2 = vec_lvsl(0, pix2);
293  pix2v = (vector unsigned char *) pix2;
294  t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
295  t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
296 
297  /* Calculate a sum of abs differences vector */
298  t3 = vec_max(t1, t2);
299  t4 = vec_min(t1, t2);
300  t5 = vec_sub(t3, t4);
301 
302  /* Add each 4 pixel group together and put 4 results into sad */
303  sad = vec_sum4s(t5, sad);
304 
305  pix1 += line_size;
306  pix2 += line_size;
307  }
308 
309  /* Sum up the four partial sums, and put the result into s */
310  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
311  sumdiffs = vec_splat(sumdiffs, 3);
312  vec_ste(sumdiffs, 0, &s);
313 
314  return s;
315 }
316 
317 static int pix_norm1_altivec(uint8_t *pix, int line_size)
318 {
319  int i;
320  int s;
321  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
322  vector unsigned char *tv;
323  vector unsigned char pixv;
324  vector unsigned int sv;
325  vector signed int sum;
326 
327  sv = (vector unsigned int)vec_splat_u32(0);
328 
329  s = 0;
330  for (i = 0; i < 16; i++) {
331  /* Read in the potentially unaligned pixels */
332  tv = (vector unsigned char *) pix;
333  pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
334 
335  /* Square the values, and add them to our sum */
336  sv = vec_msum(pixv, pixv, sv);
337 
338  pix += line_size;
339  }
340  /* Sum up the four partial sums, and put the result into s */
341  sum = vec_sums((vector signed int) sv, (vector signed int) zero);
342  sum = vec_splat(sum, 3);
343  vec_ste(sum, 0, &s);
344 
345  return s;
346 }
347 
353 static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
354 {
355  int i;
356  int s;
357  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
358  vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
359  vector unsigned char t1, t2, t3,t4, t5;
360  vector unsigned int sum;
361  vector signed int sumsqr;
362 
363  sum = (vector unsigned int)vec_splat_u32(0);
364 
365  permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
366 
367 
368  for (i = 0; i < h; i++) {
369  /* Read potentially unaligned pixels into t1 and t2
370  Since we're reading 16 pixels, and actually only want 8,
371  mask out the last 8 pixels. The 0s don't change the sum. */
372  perm1 = vec_lvsl(0, pix1);
373  pix1v = (vector unsigned char *) pix1;
374  perm2 = vec_lvsl(0, pix2);
375  pix2v = (vector unsigned char *) pix2;
376  t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
377  t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
378 
379  /* Since we want to use unsigned chars, we can take advantage
380  of the fact that abs(a-b)^2 = (a-b)^2. */
381 
382  /* Calculate abs differences vector */
383  t3 = vec_max(t1, t2);
384  t4 = vec_min(t1, t2);
385  t5 = vec_sub(t3, t4);
386 
387  /* Square the values and add them to our sum */
388  sum = vec_msum(t5, t5, sum);
389 
390  pix1 += line_size;
391  pix2 += line_size;
392  }
393 
394  /* Sum up the four partial sums, and put the result into s */
395  sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
396  sumsqr = vec_splat(sumsqr, 3);
397  vec_ste(sumsqr, 0, &s);
398 
399  return s;
400 }
401 
407 static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
408 {
409  int i;
410  int s;
411  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
412  vector unsigned char perm1, perm2, *pix1v, *pix2v;
413  vector unsigned char t1, t2, t3,t4, t5;
414  vector unsigned int sum;
415  vector signed int sumsqr;
416 
417  sum = (vector unsigned int)vec_splat_u32(0);
418 
419  for (i = 0; i < h; i++) {
420  /* Read potentially unaligned pixels into t1 and t2 */
421  perm1 = vec_lvsl(0, pix1);
422  pix1v = (vector unsigned char *) pix1;
423  perm2 = vec_lvsl(0, pix2);
424  pix2v = (vector unsigned char *) pix2;
425  t1 = vec_perm(pix1v[0], pix1v[1], perm1);
426  t2 = vec_perm(pix2v[0], pix2v[1], perm2);
427 
428  /* Since we want to use unsigned chars, we can take advantage
429  of the fact that abs(a-b)^2 = (a-b)^2. */
430 
431  /* Calculate abs differences vector */
432  t3 = vec_max(t1, t2);
433  t4 = vec_min(t1, t2);
434  t5 = vec_sub(t3, t4);
435 
436  /* Square the values and add them to our sum */
437  sum = vec_msum(t5, t5, sum);
438 
439  pix1 += line_size;
440  pix2 += line_size;
441  }
442 
443  /* Sum up the four partial sums, and put the result into s */
444  sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
445  sumsqr = vec_splat(sumsqr, 3);
446  vec_ste(sumsqr, 0, &s);
447 
448  return s;
449 }
450 
451 static int pix_sum_altivec(uint8_t * pix, int line_size)
452 {
453  const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
454  vector unsigned char perm, *pixv;
455  vector unsigned char t1;
456  vector unsigned int sad;
457  vector signed int sumdiffs;
458 
459  int i;
460  int s;
461 
462  sad = (vector unsigned int)vec_splat_u32(0);
463 
464  for (i = 0; i < 16; i++) {
465  /* Read the potentially unaligned 16 pixels into t1 */
466  perm = vec_lvsl(0, pix);
467  pixv = (vector unsigned char *) pix;
468  t1 = vec_perm(pixv[0], pixv[1], perm);
469 
470  /* Add each 4 pixel group together and put 4 results into sad */
471  sad = vec_sum4s(t1, sad);
472 
473  pix += line_size;
474  }
475 
476  /* Sum up the four partial sums, and put the result into s */
477  sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
478  sumdiffs = vec_splat(sumdiffs, 3);
479  vec_ste(sumdiffs, 0, &s);
480 
481  return s;
482 }
483 
484 static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
485 {
486  int i;
487  vector unsigned char perm, bytes, *pixv;
488  const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
489  vector signed short shorts;
490 
491  for (i = 0; i < 8; i++) {
492  // Read potentially unaligned pixels.
493  // We're reading 16 pixels, and actually only want 8,
494  // but we simply ignore the extras.
495  perm = vec_lvsl(0, pixels);
496  pixv = (vector unsigned char *) pixels;
497  bytes = vec_perm(pixv[0], pixv[1], perm);
498 
499  // convert the bytes into shorts
500  shorts = (vector signed short)vec_mergeh(zero, bytes);
501 
502  // save the data to the block, we assume the block is 16-byte aligned
503  vec_st(shorts, i*16, (vector signed short*)block);
504 
505  pixels += line_size;
506  }
507 }
508 
509 static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
510  const uint8_t *s2, int stride)
511 {
512  int i;
513  vector unsigned char perm, bytes, *pixv;
514  const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
515  vector signed short shorts1, shorts2;
516 
517  for (i = 0; i < 4; i++) {
518  // Read potentially unaligned pixels
519  // We're reading 16 pixels, and actually only want 8,
520  // but we simply ignore the extras.
521  perm = vec_lvsl(0, s1);
522  pixv = (vector unsigned char *) s1;
523  bytes = vec_perm(pixv[0], pixv[1], perm);
524 
525  // convert the bytes into shorts
526  shorts1 = (vector signed short)vec_mergeh(zero, bytes);
527 
528  // Do the same for the second block of pixels
529  perm = vec_lvsl(0, s2);
530  pixv = (vector unsigned char *) s2;
531  bytes = vec_perm(pixv[0], pixv[1], perm);
532 
533  // convert the bytes into shorts
534  shorts2 = (vector signed short)vec_mergeh(zero, bytes);
535 
536  // Do the subtraction
537  shorts1 = vec_sub(shorts1, shorts2);
538 
539  // save the data to the block, we assume the block is 16-byte aligned
540  vec_st(shorts1, 0, (vector signed short*)block);
541 
542  s1 += stride;
543  s2 += stride;
544  block += 8;
545 
546 
547  // The code below is a copy of the code above... This is a manual
548  // unroll.
549 
550  // Read potentially unaligned pixels
551  // We're reading 16 pixels, and actually only want 8,
552  // but we simply ignore the extras.
553  perm = vec_lvsl(0, s1);
554  pixv = (vector unsigned char *) s1;
555  bytes = vec_perm(pixv[0], pixv[1], perm);
556 
557  // convert the bytes into shorts
558  shorts1 = (vector signed short)vec_mergeh(zero, bytes);
559 
560  // Do the same for the second block of pixels
561  perm = vec_lvsl(0, s2);
562  pixv = (vector unsigned char *) s2;
563  bytes = vec_perm(pixv[0], pixv[1], perm);
564 
565  // convert the bytes into shorts
566  shorts2 = (vector signed short)vec_mergeh(zero, bytes);
567 
568  // Do the subtraction
569  shorts1 = vec_sub(shorts1, shorts2);
570 
571  // save the data to the block, we assume the block is 16-byte aligned
572  vec_st(shorts1, 0, (vector signed short*)block);
573 
574  s1 += stride;
575  s2 += stride;
576  block += 8;
577  }
578 }
579 
580 
582  LOAD_ZERO;
583  vec_st(zero_s16v, 0, block);
584  vec_st(zero_s16v, 16, block);
585  vec_st(zero_s16v, 32, block);
586  vec_st(zero_s16v, 48, block);
587  vec_st(zero_s16v, 64, block);
588  vec_st(zero_s16v, 80, block);
589  vec_st(zero_s16v, 96, block);
590  vec_st(zero_s16v, 112, block);
591 }
592 
593 
594 static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
595  register int i;
596  register vector unsigned char vdst, vsrc;
597 
598  /* dst and src are 16 bytes-aligned (guaranteed) */
599  for (i = 0 ; (i + 15) < w ; i+=16) {
600  vdst = vec_ld(i, (unsigned char*)dst);
601  vsrc = vec_ld(i, (unsigned char*)src);
602  vdst = vec_add(vsrc, vdst);
603  vec_st(vdst, i, (unsigned char*)dst);
604  }
605  /* if w is not a multiple of 16 */
606  for (; (i < w) ; i++) {
607  dst[i] = src[i];
608  }
609 }
610 
611 /* next one assumes that ((line_size % 16) == 0) */
612 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
613 {
614  register vector unsigned char pixelsv1, pixelsv2;
615  register vector unsigned char pixelsv1B, pixelsv2B;
616  register vector unsigned char pixelsv1C, pixelsv2C;
617  register vector unsigned char pixelsv1D, pixelsv2D;
618 
619  register vector unsigned char perm = vec_lvsl(0, pixels);
620  int i;
621  register int line_size_2 = line_size << 1;
622  register int line_size_3 = line_size + line_size_2;
623  register int line_size_4 = line_size << 2;
624 
625 // hand-unrolling the loop by 4 gains about 15%
626 // mininum execution time goes from 74 to 60 cycles
627 // it's faster than -funroll-loops, but using
628 // -funroll-loops w/ this is bad - 74 cycles again.
629 // all this is on a 7450, tuning for the 7450
630  for (i = 0; i < h; i += 4) {
631  pixelsv1 = vec_ld( 0, pixels);
632  pixelsv2 = vec_ld(15, pixels);
633  pixelsv1B = vec_ld(line_size, pixels);
634  pixelsv2B = vec_ld(15 + line_size, pixels);
635  pixelsv1C = vec_ld(line_size_2, pixels);
636  pixelsv2C = vec_ld(15 + line_size_2, pixels);
637  pixelsv1D = vec_ld(line_size_3, pixels);
638  pixelsv2D = vec_ld(15 + line_size_3, pixels);
639  vec_st(vec_perm(pixelsv1, pixelsv2, perm),
640  0, (unsigned char*)block);
641  vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
642  line_size, (unsigned char*)block);
643  vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
644  line_size_2, (unsigned char*)block);
645  vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
646  line_size_3, (unsigned char*)block);
647  pixels+=line_size_4;
648  block +=line_size_4;
649  }
650 }
651 
652 /* next one assumes that ((line_size % 16) == 0) */
653 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
654 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
655 {
656  register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
657  register vector unsigned char perm = vec_lvsl(0, pixels);
658  int i;
659 
660  for (i = 0; i < h; i++) {
661  pixelsv1 = vec_ld( 0, pixels);
662  pixelsv2 = vec_ld(16,pixels);
663  blockv = vec_ld(0, block);
664  pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
665  blockv = vec_avg(blockv,pixelsv);
666  vec_st(blockv, 0, (unsigned char*)block);
667  pixels+=line_size;
668  block +=line_size;
669  }
670 }
671 
672 /* next one assumes that ((line_size % 8) == 0) */
673 static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
674 {
675  register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
676  int i;
677 
678  for (i = 0; i < h; i++) {
679  /* block is 8 bytes-aligned, so we're either in the
680  left block (16 bytes-aligned) or in the right block (not) */
681  int rightside = ((unsigned long)block & 0x0000000F);
682 
683  blockv = vec_ld(0, block);
684  pixelsv1 = vec_ld( 0, pixels);
685  pixelsv2 = vec_ld(16, pixels);
686  pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
687 
688  if (rightside) {
689  pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
690  } else {
691  pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
692  }
693 
694  blockv = vec_avg(blockv, pixelsv);
695 
696  vec_st(blockv, 0, block);
697 
698  pixels += line_size;
699  block += line_size;
700  }
701 }
702 
703 /* next one assumes that ((line_size % 8) == 0) */
704 static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
705 {
706  register int i;
707  register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
708  register vector unsigned char blockv, temp1, temp2;
709  register vector unsigned short pixelssum1, pixelssum2, temp3;
710  register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
711  register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
712 
713  temp1 = vec_ld(0, pixels);
714  temp2 = vec_ld(16, pixels);
715  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
716  if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
717  pixelsv2 = temp2;
718  } else {
719  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
720  }
721  pixelsv1 = vec_mergeh(vczero, pixelsv1);
722  pixelsv2 = vec_mergeh(vczero, pixelsv2);
723  pixelssum1 = vec_add((vector unsigned short)pixelsv1,
724  (vector unsigned short)pixelsv2);
725  pixelssum1 = vec_add(pixelssum1, vctwo);
726 
727  for (i = 0; i < h ; i++) {
728  int rightside = ((unsigned long)block & 0x0000000F);
729  blockv = vec_ld(0, block);
730 
731  temp1 = vec_ld(line_size, pixels);
732  temp2 = vec_ld(line_size + 16, pixels);
733  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
734  if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
735  pixelsv2 = temp2;
736  } else {
737  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
738  }
739 
740  pixelsv1 = vec_mergeh(vczero, pixelsv1);
741  pixelsv2 = vec_mergeh(vczero, pixelsv2);
742  pixelssum2 = vec_add((vector unsigned short)pixelsv1,
743  (vector unsigned short)pixelsv2);
744  temp3 = vec_add(pixelssum1, pixelssum2);
745  temp3 = vec_sra(temp3, vctwo);
746  pixelssum1 = vec_add(pixelssum2, vctwo);
747  pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
748 
749  if (rightside) {
750  blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
751  } else {
752  blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
753  }
754 
755  vec_st(blockv, 0, block);
756 
757  block += line_size;
758  pixels += line_size;
759  }
760 }
761 
762 /* next one assumes that ((line_size % 8) == 0) */
763 static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
764 {
765  register int i;
766  register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
767  register vector unsigned char blockv, temp1, temp2;
768  register vector unsigned short pixelssum1, pixelssum2, temp3;
769  register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
770  register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
771  register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
772 
773  temp1 = vec_ld(0, pixels);
774  temp2 = vec_ld(16, pixels);
775  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
776  if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
777  pixelsv2 = temp2;
778  } else {
779  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
780  }
781  pixelsv1 = vec_mergeh(vczero, pixelsv1);
782  pixelsv2 = vec_mergeh(vczero, pixelsv2);
783  pixelssum1 = vec_add((vector unsigned short)pixelsv1,
784  (vector unsigned short)pixelsv2);
785  pixelssum1 = vec_add(pixelssum1, vcone);
786 
787  for (i = 0; i < h ; i++) {
788  int rightside = ((unsigned long)block & 0x0000000F);
789  blockv = vec_ld(0, block);
790 
791  temp1 = vec_ld(line_size, pixels);
792  temp2 = vec_ld(line_size + 16, pixels);
793  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
794  if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
795  pixelsv2 = temp2;
796  } else {
797  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
798  }
799 
800  pixelsv1 = vec_mergeh(vczero, pixelsv1);
801  pixelsv2 = vec_mergeh(vczero, pixelsv2);
802  pixelssum2 = vec_add((vector unsigned short)pixelsv1,
803  (vector unsigned short)pixelsv2);
804  temp3 = vec_add(pixelssum1, pixelssum2);
805  temp3 = vec_sra(temp3, vctwo);
806  pixelssum1 = vec_add(pixelssum2, vcone);
807  pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
808 
809  if (rightside) {
810  blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
811  } else {
812  blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
813  }
814 
815  vec_st(blockv, 0, block);
816 
817  block += line_size;
818  pixels += line_size;
819  }
820 }
821 
822 /* next one assumes that ((line_size % 16) == 0) */
823 static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
824 {
825  register int i;
826  register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
827  register vector unsigned char blockv, temp1, temp2;
828  register vector unsigned short temp3, temp4,
829  pixelssum1, pixelssum2, pixelssum3, pixelssum4;
830  register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
831  register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
832 
833  temp1 = vec_ld(0, pixels);
834  temp2 = vec_ld(16, pixels);
835  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
836  if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
837  pixelsv2 = temp2;
838  } else {
839  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
840  }
841  pixelsv3 = vec_mergel(vczero, pixelsv1);
842  pixelsv4 = vec_mergel(vczero, pixelsv2);
843  pixelsv1 = vec_mergeh(vczero, pixelsv1);
844  pixelsv2 = vec_mergeh(vczero, pixelsv2);
845  pixelssum3 = vec_add((vector unsigned short)pixelsv3,
846  (vector unsigned short)pixelsv4);
847  pixelssum3 = vec_add(pixelssum3, vctwo);
848  pixelssum1 = vec_add((vector unsigned short)pixelsv1,
849  (vector unsigned short)pixelsv2);
850  pixelssum1 = vec_add(pixelssum1, vctwo);
851 
852  for (i = 0; i < h ; i++) {
853  blockv = vec_ld(0, block);
854 
855  temp1 = vec_ld(line_size, pixels);
856  temp2 = vec_ld(line_size + 16, pixels);
857  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
858  if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
859  pixelsv2 = temp2;
860  } else {
861  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
862  }
863 
864  pixelsv3 = vec_mergel(vczero, pixelsv1);
865  pixelsv4 = vec_mergel(vczero, pixelsv2);
866  pixelsv1 = vec_mergeh(vczero, pixelsv1);
867  pixelsv2 = vec_mergeh(vczero, pixelsv2);
868 
869  pixelssum4 = vec_add((vector unsigned short)pixelsv3,
870  (vector unsigned short)pixelsv4);
871  pixelssum2 = vec_add((vector unsigned short)pixelsv1,
872  (vector unsigned short)pixelsv2);
873  temp4 = vec_add(pixelssum3, pixelssum4);
874  temp4 = vec_sra(temp4, vctwo);
875  temp3 = vec_add(pixelssum1, pixelssum2);
876  temp3 = vec_sra(temp3, vctwo);
877 
878  pixelssum3 = vec_add(pixelssum4, vctwo);
879  pixelssum1 = vec_add(pixelssum2, vctwo);
880 
881  blockv = vec_packsu(temp3, temp4);
882 
883  vec_st(blockv, 0, block);
884 
885  block += line_size;
886  pixels += line_size;
887  }
888 }
889 
890 /* next one assumes that ((line_size % 16) == 0) */
891 static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
892 {
893  register int i;
894  register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
895  register vector unsigned char blockv, temp1, temp2;
896  register vector unsigned short temp3, temp4,
897  pixelssum1, pixelssum2, pixelssum3, pixelssum4;
898  register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
899  register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
900  register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
901 
902  temp1 = vec_ld(0, pixels);
903  temp2 = vec_ld(16, pixels);
904  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
905  if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
906  pixelsv2 = temp2;
907  } else {
908  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
909  }
910  pixelsv3 = vec_mergel(vczero, pixelsv1);
911  pixelsv4 = vec_mergel(vczero, pixelsv2);
912  pixelsv1 = vec_mergeh(vczero, pixelsv1);
913  pixelsv2 = vec_mergeh(vczero, pixelsv2);
914  pixelssum3 = vec_add((vector unsigned short)pixelsv3,
915  (vector unsigned short)pixelsv4);
916  pixelssum3 = vec_add(pixelssum3, vcone);
917  pixelssum1 = vec_add((vector unsigned short)pixelsv1,
918  (vector unsigned short)pixelsv2);
919  pixelssum1 = vec_add(pixelssum1, vcone);
920 
921  for (i = 0; i < h ; i++) {
922  blockv = vec_ld(0, block);
923 
924  temp1 = vec_ld(line_size, pixels);
925  temp2 = vec_ld(line_size + 16, pixels);
926  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
927  if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
928  pixelsv2 = temp2;
929  } else {
930  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
931  }
932 
933  pixelsv3 = vec_mergel(vczero, pixelsv1);
934  pixelsv4 = vec_mergel(vczero, pixelsv2);
935  pixelsv1 = vec_mergeh(vczero, pixelsv1);
936  pixelsv2 = vec_mergeh(vczero, pixelsv2);
937 
938  pixelssum4 = vec_add((vector unsigned short)pixelsv3,
939  (vector unsigned short)pixelsv4);
940  pixelssum2 = vec_add((vector unsigned short)pixelsv1,
941  (vector unsigned short)pixelsv2);
942  temp4 = vec_add(pixelssum3, pixelssum4);
943  temp4 = vec_sra(temp4, vctwo);
944  temp3 = vec_add(pixelssum1, pixelssum2);
945  temp3 = vec_sra(temp3, vctwo);
946 
947  pixelssum3 = vec_add(pixelssum4, vcone);
948  pixelssum1 = vec_add(pixelssum2, vcone);
949 
950  blockv = vec_packsu(temp3, temp4);
951 
952  vec_st(blockv, 0, block);
953 
954  block += line_size;
955  pixels += line_size;
956  }
957 }
958 
959 static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
960  int sum;
961  register const vector unsigned char vzero =
962  (const vector unsigned char)vec_splat_u8(0);
963  register vector signed short temp0, temp1, temp2, temp3, temp4,
964  temp5, temp6, temp7;
965  {
966  register const vector signed short vprod1 =(const vector signed short)
967  { 1,-1, 1,-1, 1,-1, 1,-1 };
968  register const vector signed short vprod2 =(const vector signed short)
969  { 1, 1,-1,-1, 1, 1,-1,-1 };
970  register const vector signed short vprod3 =(const vector signed short)
971  { 1, 1, 1, 1,-1,-1,-1,-1 };
972  register const vector unsigned char perm1 = (const vector unsigned char)
973  {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
974  0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
975  register const vector unsigned char perm2 = (const vector unsigned char)
976  {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
977  0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
978  register const vector unsigned char perm3 = (const vector unsigned char)
979  {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
980  0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
981 
982 #define ONEITERBUTTERFLY(i, res) \
983  { \
984  register vector unsigned char src1, src2, srcO; \
985  register vector unsigned char dst1, dst2, dstO; \
986  register vector signed short srcV, dstV; \
987  register vector signed short but0, but1, but2, op1, op2, op3; \
988  src1 = vec_ld(stride * i, src); \
989  src2 = vec_ld((stride * i) + 15, src); \
990  srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
991  dst1 = vec_ld(stride * i, dst); \
992  dst2 = vec_ld((stride * i) + 15, dst); \
993  dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
994  /* promote the unsigned chars to signed shorts */ \
995  /* we're in the 8x8 function, we only care for the first 8 */ \
996  srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
997  (vector signed char)srcO); \
998  dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
999  (vector signed char)dstO); \
1000  /* subtractions inside the first butterfly */ \
1001  but0 = vec_sub(srcV, dstV); \
1002  op1 = vec_perm(but0, but0, perm1); \
1003  but1 = vec_mladd(but0, vprod1, op1); \
1004  op2 = vec_perm(but1, but1, perm2); \
1005  but2 = vec_mladd(but1, vprod2, op2); \
1006  op3 = vec_perm(but2, but2, perm3); \
1007  res = vec_mladd(but2, vprod3, op3); \
1008  }
1009  ONEITERBUTTERFLY(0, temp0);
1010  ONEITERBUTTERFLY(1, temp1);
1011  ONEITERBUTTERFLY(2, temp2);
1012  ONEITERBUTTERFLY(3, temp3);
1013  ONEITERBUTTERFLY(4, temp4);
1014  ONEITERBUTTERFLY(5, temp5);
1015  ONEITERBUTTERFLY(6, temp6);
1016  ONEITERBUTTERFLY(7, temp7);
1017  }
1018 #undef ONEITERBUTTERFLY
1019  {
1020  register vector signed int vsum;
1021  register vector signed short line0 = vec_add(temp0, temp1);
1022  register vector signed short line1 = vec_sub(temp0, temp1);
1023  register vector signed short line2 = vec_add(temp2, temp3);
1024  register vector signed short line3 = vec_sub(temp2, temp3);
1025  register vector signed short line4 = vec_add(temp4, temp5);
1026  register vector signed short line5 = vec_sub(temp4, temp5);
1027  register vector signed short line6 = vec_add(temp6, temp7);
1028  register vector signed short line7 = vec_sub(temp6, temp7);
1029 
1030  register vector signed short line0B = vec_add(line0, line2);
1031  register vector signed short line2B = vec_sub(line0, line2);
1032  register vector signed short line1B = vec_add(line1, line3);
1033  register vector signed short line3B = vec_sub(line1, line3);
1034  register vector signed short line4B = vec_add(line4, line6);
1035  register vector signed short line6B = vec_sub(line4, line6);
1036  register vector signed short line5B = vec_add(line5, line7);
1037  register vector signed short line7B = vec_sub(line5, line7);
1038 
1039  register vector signed short line0C = vec_add(line0B, line4B);
1040  register vector signed short line4C = vec_sub(line0B, line4B);
1041  register vector signed short line1C = vec_add(line1B, line5B);
1042  register vector signed short line5C = vec_sub(line1B, line5B);
1043  register vector signed short line2C = vec_add(line2B, line6B);
1044  register vector signed short line6C = vec_sub(line2B, line6B);
1045  register vector signed short line3C = vec_add(line3B, line7B);
1046  register vector signed short line7C = vec_sub(line3B, line7B);
1047 
1048  vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1049  vsum = vec_sum4s(vec_abs(line1C), vsum);
1050  vsum = vec_sum4s(vec_abs(line2C), vsum);
1051  vsum = vec_sum4s(vec_abs(line3C), vsum);
1052  vsum = vec_sum4s(vec_abs(line4C), vsum);
1053  vsum = vec_sum4s(vec_abs(line5C), vsum);
1054  vsum = vec_sum4s(vec_abs(line6C), vsum);
1055  vsum = vec_sum4s(vec_abs(line7C), vsum);
1056  vsum = vec_sums(vsum, (vector signed int)vzero);
1057  vsum = vec_splat(vsum, 3);
1058  vec_ste(vsum, 0, &sum);
1059  }
1060  return sum;
1061 }
1062 
1063 /*
1064 16x8 works with 16 elements; it allows to avoid replicating loads, and
1065 give the compiler more rooms for scheduling. It's only used from
1066 inside hadamard8_diff16_altivec.
1067 
1068 Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
1069 of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
1070 by itself. The following code include hand-made registers allocation. It's not
1071 clean, but on a 7450 the resulting code is much faster (best case fall from
1072 700+ cycles to 550).
1073 
1074 xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
1075 and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
1076 instructions...)
1077 
1078 On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
1079 xlc goes to around 660 on the regular C code...
1080 */
1081 
1082 static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1083  int sum;
1084  register vector signed short
1085  temp0 __asm__ ("v0"),
1086  temp1 __asm__ ("v1"),
1087  temp2 __asm__ ("v2"),
1088  temp3 __asm__ ("v3"),
1089  temp4 __asm__ ("v4"),
1090  temp5 __asm__ ("v5"),
1091  temp6 __asm__ ("v6"),
1092  temp7 __asm__ ("v7");
1093  register vector signed short
1094  temp0S __asm__ ("v8"),
1095  temp1S __asm__ ("v9"),
1096  temp2S __asm__ ("v10"),
1097  temp3S __asm__ ("v11"),
1098  temp4S __asm__ ("v12"),
1099  temp5S __asm__ ("v13"),
1100  temp6S __asm__ ("v14"),
1101  temp7S __asm__ ("v15");
1102  register const vector unsigned char vzero __asm__ ("v31") =
1103  (const vector unsigned char)vec_splat_u8(0);
1104  {
1105  register const vector signed short vprod1 __asm__ ("v16") =
1106  (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
1107  register const vector signed short vprod2 __asm__ ("v17") =
1108  (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
1109  register const vector signed short vprod3 __asm__ ("v18") =
1110  (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
1111  register const vector unsigned char perm1 __asm__ ("v19") =
1112  (const vector unsigned char)
1113  {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
1114  0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
1115  register const vector unsigned char perm2 __asm__ ("v20") =
1116  (const vector unsigned char)
1117  {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
1118  0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
1119  register const vector unsigned char perm3 __asm__ ("v21") =
1120  (const vector unsigned char)
1121  {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1122  0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
1123 
1124 #define ONEITERBUTTERFLY(i, res1, res2) \
1125  { \
1126  register vector unsigned char src1 __asm__ ("v22"), \
1127  src2 __asm__ ("v23"), \
1128  dst1 __asm__ ("v24"), \
1129  dst2 __asm__ ("v25"), \
1130  srcO __asm__ ("v22"), \
1131  dstO __asm__ ("v23"); \
1132  \
1133  register vector signed short srcV __asm__ ("v24"), \
1134  dstV __asm__ ("v25"), \
1135  srcW __asm__ ("v26"), \
1136  dstW __asm__ ("v27"), \
1137  but0 __asm__ ("v28"), \
1138  but0S __asm__ ("v29"), \
1139  op1 __asm__ ("v30"), \
1140  but1 __asm__ ("v22"), \
1141  op1S __asm__ ("v23"), \
1142  but1S __asm__ ("v24"), \
1143  op2 __asm__ ("v25"), \
1144  but2 __asm__ ("v26"), \
1145  op2S __asm__ ("v27"), \
1146  but2S __asm__ ("v28"), \
1147  op3 __asm__ ("v29"), \
1148  op3S __asm__ ("v30"); \
1149  \
1150  src1 = vec_ld(stride * i, src); \
1151  src2 = vec_ld((stride * i) + 16, src); \
1152  srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
1153  dst1 = vec_ld(stride * i, dst); \
1154  dst2 = vec_ld((stride * i) + 16, dst); \
1155  dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
1156  /* promote the unsigned chars to signed shorts */ \
1157  srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1158  (vector signed char)srcO); \
1159  dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
1160  (vector signed char)dstO); \
1161  srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
1162  (vector signed char)srcO); \
1163  dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
1164  (vector signed char)dstO); \
1165  /* subtractions inside the first butterfly */ \
1166  but0 = vec_sub(srcV, dstV); \
1167  but0S = vec_sub(srcW, dstW); \
1168  op1 = vec_perm(but0, but0, perm1); \
1169  but1 = vec_mladd(but0, vprod1, op1); \
1170  op1S = vec_perm(but0S, but0S, perm1); \
1171  but1S = vec_mladd(but0S, vprod1, op1S); \
1172  op2 = vec_perm(but1, but1, perm2); \
1173  but2 = vec_mladd(but1, vprod2, op2); \
1174  op2S = vec_perm(but1S, but1S, perm2); \
1175  but2S = vec_mladd(but1S, vprod2, op2S); \
1176  op3 = vec_perm(but2, but2, perm3); \
1177  res1 = vec_mladd(but2, vprod3, op3); \
1178  op3S = vec_perm(but2S, but2S, perm3); \
1179  res2 = vec_mladd(but2S, vprod3, op3S); \
1180  }
1181  ONEITERBUTTERFLY(0, temp0, temp0S);
1182  ONEITERBUTTERFLY(1, temp1, temp1S);
1183  ONEITERBUTTERFLY(2, temp2, temp2S);
1184  ONEITERBUTTERFLY(3, temp3, temp3S);
1185  ONEITERBUTTERFLY(4, temp4, temp4S);
1186  ONEITERBUTTERFLY(5, temp5, temp5S);
1187  ONEITERBUTTERFLY(6, temp6, temp6S);
1188  ONEITERBUTTERFLY(7, temp7, temp7S);
1189  }
1190 #undef ONEITERBUTTERFLY
1191  {
1192  register vector signed int vsum;
1193  register vector signed short line0S, line1S, line2S, line3S, line4S,
1194  line5S, line6S, line7S, line0BS,line2BS,
1195  line1BS,line3BS,line4BS,line6BS,line5BS,
1196  line7BS,line0CS,line4CS,line1CS,line5CS,
1197  line2CS,line6CS,line3CS,line7CS;
1198 
1199  register vector signed short line0 = vec_add(temp0, temp1);
1200  register vector signed short line1 = vec_sub(temp0, temp1);
1201  register vector signed short line2 = vec_add(temp2, temp3);
1202  register vector signed short line3 = vec_sub(temp2, temp3);
1203  register vector signed short line4 = vec_add(temp4, temp5);
1204  register vector signed short line5 = vec_sub(temp4, temp5);
1205  register vector signed short line6 = vec_add(temp6, temp7);
1206  register vector signed short line7 = vec_sub(temp6, temp7);
1207 
1208  register vector signed short line0B = vec_add(line0, line2);
1209  register vector signed short line2B = vec_sub(line0, line2);
1210  register vector signed short line1B = vec_add(line1, line3);
1211  register vector signed short line3B = vec_sub(line1, line3);
1212  register vector signed short line4B = vec_add(line4, line6);
1213  register vector signed short line6B = vec_sub(line4, line6);
1214  register vector signed short line5B = vec_add(line5, line7);
1215  register vector signed short line7B = vec_sub(line5, line7);
1216 
1217  register vector signed short line0C = vec_add(line0B, line4B);
1218  register vector signed short line4C = vec_sub(line0B, line4B);
1219  register vector signed short line1C = vec_add(line1B, line5B);
1220  register vector signed short line5C = vec_sub(line1B, line5B);
1221  register vector signed short line2C = vec_add(line2B, line6B);
1222  register vector signed short line6C = vec_sub(line2B, line6B);
1223  register vector signed short line3C = vec_add(line3B, line7B);
1224  register vector signed short line7C = vec_sub(line3B, line7B);
1225 
1226  vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
1227  vsum = vec_sum4s(vec_abs(line1C), vsum);
1228  vsum = vec_sum4s(vec_abs(line2C), vsum);
1229  vsum = vec_sum4s(vec_abs(line3C), vsum);
1230  vsum = vec_sum4s(vec_abs(line4C), vsum);
1231  vsum = vec_sum4s(vec_abs(line5C), vsum);
1232  vsum = vec_sum4s(vec_abs(line6C), vsum);
1233  vsum = vec_sum4s(vec_abs(line7C), vsum);
1234 
1235  line0S = vec_add(temp0S, temp1S);
1236  line1S = vec_sub(temp0S, temp1S);
1237  line2S = vec_add(temp2S, temp3S);
1238  line3S = vec_sub(temp2S, temp3S);
1239  line4S = vec_add(temp4S, temp5S);
1240  line5S = vec_sub(temp4S, temp5S);
1241  line6S = vec_add(temp6S, temp7S);
1242  line7S = vec_sub(temp6S, temp7S);
1243 
1244  line0BS = vec_add(line0S, line2S);
1245  line2BS = vec_sub(line0S, line2S);
1246  line1BS = vec_add(line1S, line3S);
1247  line3BS = vec_sub(line1S, line3S);
1248  line4BS = vec_add(line4S, line6S);
1249  line6BS = vec_sub(line4S, line6S);
1250  line5BS = vec_add(line5S, line7S);
1251  line7BS = vec_sub(line5S, line7S);
1252 
1253  line0CS = vec_add(line0BS, line4BS);
1254  line4CS = vec_sub(line0BS, line4BS);
1255  line1CS = vec_add(line1BS, line5BS);
1256  line5CS = vec_sub(line1BS, line5BS);
1257  line2CS = vec_add(line2BS, line6BS);
1258  line6CS = vec_sub(line2BS, line6BS);
1259  line3CS = vec_add(line3BS, line7BS);
1260  line7CS = vec_sub(line3BS, line7BS);
1261 
1262  vsum = vec_sum4s(vec_abs(line0CS), vsum);
1263  vsum = vec_sum4s(vec_abs(line1CS), vsum);
1264  vsum = vec_sum4s(vec_abs(line2CS), vsum);
1265  vsum = vec_sum4s(vec_abs(line3CS), vsum);
1266  vsum = vec_sum4s(vec_abs(line4CS), vsum);
1267  vsum = vec_sum4s(vec_abs(line5CS), vsum);
1268  vsum = vec_sum4s(vec_abs(line6CS), vsum);
1269  vsum = vec_sum4s(vec_abs(line7CS), vsum);
1270  vsum = vec_sums(vsum, (vector signed int)vzero);
1271  vsum = vec_splat(vsum, 3);
1272  vec_ste(vsum, 0, &sum);
1273  }
1274  return sum;
1275 }
1276 
1277 static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
1278  int score;
1279  score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1280  if (h==16) {
1281  dst += 8*stride;
1282  src += 8*stride;
1283  score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
1284  }
1285  return score;
1286 }
1287 
1288 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
1289  int blocksize)
1290 {
1291  int i;
1292  vector float m, a;
1293  vector bool int t0, t1;
1294  const vector unsigned int v_31 = //XXX
1295  vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
1296  for (i = 0; i < blocksize; i += 4) {
1297  m = vec_ld(0, mag+i);
1298  a = vec_ld(0, ang+i);
1299  t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
1300  t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
1301  a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1302  t0 = (vector bool int)vec_and(a, t1);
1303  t1 = (vector bool int)vec_andc(a, t1);
1304  a = vec_sub(m, (vector float)t1);
1305  m = vec_add(m, (vector float)t0);
1306  vec_stl(a, 0, ang+i);
1307  vec_stl(m, 0, mag+i);
1308  }
1309 }
1310 
1311 /* next one assumes that ((line_size % 8) == 0) */
1312 static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
1313 {
1314  register int i;
1315  register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
1316  register vector unsigned char blockv, temp1, temp2, blocktemp;
1317  register vector unsigned short pixelssum1, pixelssum2, temp3;
1318 
1319  register const vector unsigned char vczero = (const vector unsigned char)
1320  vec_splat_u8(0);
1321  register const vector unsigned short vctwo = (const vector unsigned short)
1322  vec_splat_u16(2);
1323 
1324  temp1 = vec_ld(0, pixels);
1325  temp2 = vec_ld(16, pixels);
1326  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
1327  if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
1328  pixelsv2 = temp2;
1329  } else {
1330  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
1331  }
1332  pixelsv1 = vec_mergeh(vczero, pixelsv1);
1333  pixelsv2 = vec_mergeh(vczero, pixelsv2);
1334  pixelssum1 = vec_add((vector unsigned short)pixelsv1,
1335  (vector unsigned short)pixelsv2);
1336  pixelssum1 = vec_add(pixelssum1, vctwo);
1337 
1338  for (i = 0; i < h ; i++) {
1339  int rightside = ((unsigned long)block & 0x0000000F);
1340  blockv = vec_ld(0, block);
1341 
1342  temp1 = vec_ld(line_size, pixels);
1343  temp2 = vec_ld(line_size + 16, pixels);
1344  pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
1345  if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
1346  pixelsv2 = temp2;
1347  } else {
1348  pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
1349  }
1350 
1351  pixelsv1 = vec_mergeh(vczero, pixelsv1);
1352  pixelsv2 = vec_mergeh(vczero, pixelsv2);
1353  pixelssum2 = vec_add((vector unsigned short)pixelsv1,
1354  (vector unsigned short)pixelsv2);
1355  temp3 = vec_add(pixelssum1, pixelssum2);
1356  temp3 = vec_sra(temp3, vctwo);
1357  pixelssum1 = vec_add(pixelssum2, vctwo);
1358  pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1359 
1360  if (rightside) {
1361  blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
1362  } else {
1363  blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
1364  }
1365 
1366  blockv = vec_avg(blocktemp, blockv);
1367  vec_st(blockv, 0, block);
1368 
1369  block += line_size;
1370  pixels += line_size;
1371  }
1372 }
1373 
1375 {
1376  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
1377 
1378  c->pix_abs[0][1] = sad16_x2_altivec;
1379  c->pix_abs[0][2] = sad16_y2_altivec;
1380  c->pix_abs[0][3] = sad16_xy2_altivec;
1381  c->pix_abs[0][0] = sad16_altivec;
1382  c->pix_abs[1][0] = sad8_altivec;
1383  c->sad[0]= sad16_altivec;
1384  c->sad[1]= sad8_altivec;
1386  c->sse[1]= sse8_altivec;
1387  c->sse[0]= sse16_altivec;
1388  c->pix_sum = pix_sum_altivec;
1391  if (!high_bit_depth) {
1395  /* the two functions do the same thing, so use the same code */
1404  }
1405 
1410 }