h264_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/cpu.h"
22 #include "libavcodec/dsputil.h"
23 #include "libavcodec/h264data.h"
24 #include "libavcodec/h264dsp.h"
25 
26 #include "dsputil_altivec.h"
27 #include "util_altivec.h"
28 #include "types_altivec.h"
29 
30 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
31 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
32 
33 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
34 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
35 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
36 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
37 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
38 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
39 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
40 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
41 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
42 #include "h264_template_altivec.c"
43 #undef OP_U8_ALTIVEC
44 #undef PREFIX_h264_chroma_mc8_altivec
45 #undef PREFIX_h264_chroma_mc8_num
46 #undef PREFIX_h264_qpel16_h_lowpass_altivec
47 #undef PREFIX_h264_qpel16_h_lowpass_num
48 #undef PREFIX_h264_qpel16_v_lowpass_altivec
49 #undef PREFIX_h264_qpel16_v_lowpass_num
50 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
51 #undef PREFIX_h264_qpel16_hv_lowpass_num
52 
53 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
54 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
55 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
56 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
57 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
58 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
59 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
60 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
61 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
62 #include "h264_template_altivec.c"
63 #undef OP_U8_ALTIVEC
64 #undef PREFIX_h264_chroma_mc8_altivec
65 #undef PREFIX_h264_chroma_mc8_num
66 #undef PREFIX_h264_qpel16_h_lowpass_altivec
67 #undef PREFIX_h264_qpel16_h_lowpass_num
68 #undef PREFIX_h264_qpel16_v_lowpass_altivec
69 #undef PREFIX_h264_qpel16_v_lowpass_num
70 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
71 #undef PREFIX_h264_qpel16_hv_lowpass_num
72 
73 #define H264_MC(OPNAME, SIZE, CODETYPE) \
74 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
75  OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
76 }\
77 \
78 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
79  DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
80  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
81  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
82 }\
83 \
84 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
85  OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
86 }\
87 \
88 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
89  DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
90  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
91  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
92 }\
93 \
94 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
95  DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
96  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
97  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
98 }\
99 \
100 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
101  OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
102 }\
103 \
104 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
105  DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
106  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
107  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
108 }\
109 \
110 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
111  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
112  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
113  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
114  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
115  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
116 }\
117 \
118 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
119  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
120  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
121  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
122  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
123  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
124 }\
125 \
126 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
127  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
128  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
129  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
130  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
131  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
132 }\
133 \
134 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
135  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
136  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
137  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
138  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
139  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
140 }\
141 \
142 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
143  DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
144  OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
145 }\
146 \
147 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
148  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
149  DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
150  DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
151  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
152  put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
153  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
154 }\
155 \
156 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
157  DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
158  DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
159  DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
160  put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
161  put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
162  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
163 }\
164 \
165 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
166  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
167  DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
168  DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
169  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
170  put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
171  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
172 }\
173 \
174 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
175  DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
176  DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
177  DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
178  put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
179  put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
180  OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
181 }\
182 
183 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
184  const uint8_t * src2, int dst_stride,
185  int src_stride1, int h)
186 {
187  int i;
188  vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
189 
190  mask_ = vec_lvsl(0, src2);
191 
192  for (i = 0; i < h; i++) {
193 
194  tmp1 = vec_ld(i * src_stride1, src1);
195  mask = vec_lvsl(i * src_stride1, src1);
196  tmp2 = vec_ld(i * src_stride1 + 15, src1);
197 
198  a = vec_perm(tmp1, tmp2, mask);
199 
200  tmp1 = vec_ld(i * 16, src2);
201  tmp2 = vec_ld(i * 16 + 15, src2);
202 
203  b = vec_perm(tmp1, tmp2, mask_);
204 
205  tmp1 = vec_ld(0, dst);
206  mask = vec_lvsl(0, dst);
207  tmp2 = vec_ld(15, dst);
208 
209  d = vec_avg(a, b);
210 
211  edges = vec_perm(tmp2, tmp1, mask);
212 
213  align = vec_lvsr(0, dst);
214 
215  tmp2 = vec_perm(d, edges, align);
216  tmp1 = vec_perm(edges, d, align);
217 
218  vec_st(tmp2, 15, dst);
219  vec_st(tmp1, 0 , dst);
220 
221  dst += dst_stride;
222  }
223 }
224 
225 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
226  const uint8_t * src2, int dst_stride,
227  int src_stride1, int h)
228 {
229  int i;
230  vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
231 
232  mask_ = vec_lvsl(0, src2);
233 
234  for (i = 0; i < h; i++) {
235 
236  tmp1 = vec_ld(i * src_stride1, src1);
237  mask = vec_lvsl(i * src_stride1, src1);
238  tmp2 = vec_ld(i * src_stride1 + 15, src1);
239 
240  a = vec_perm(tmp1, tmp2, mask);
241 
242  tmp1 = vec_ld(i * 16, src2);
243  tmp2 = vec_ld(i * 16 + 15, src2);
244 
245  b = vec_perm(tmp1, tmp2, mask_);
246 
247  tmp1 = vec_ld(0, dst);
248  mask = vec_lvsl(0, dst);
249  tmp2 = vec_ld(15, dst);
250 
251  d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
252 
253  edges = vec_perm(tmp2, tmp1, mask);
254 
255  align = vec_lvsr(0, dst);
256 
257  tmp2 = vec_perm(d, edges, align);
258  tmp1 = vec_perm(edges, d, align);
259 
260  vec_st(tmp2, 15, dst);
261  vec_st(tmp1, 0 , dst);
262 
263  dst += dst_stride;
264  }
265 }
266 
267 /* Implemented but could be faster
268 #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
269 #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
270  */
271 
272 H264_MC(put_, 16, altivec)
273 H264_MC(avg_, 16, altivec)
274 
275 
276 /****************************************************************************
277  * IDCT transform:
278  ****************************************************************************/
279 
280 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
281  /* 1st stage */ \
282  vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
283  vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
284  vz2 = vec_sra(vb1,vec_splat_u16(1)); \
285  vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
286  vz3 = vec_sra(vb3,vec_splat_u16(1)); \
287  vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
288  /* 2nd stage: output */ \
289  va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
290  va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
291  va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
292  va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
293 
294 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
295  b0 = vec_mergeh( a0, a0 ); \
296  b1 = vec_mergeh( a1, a0 ); \
297  b2 = vec_mergeh( a2, a0 ); \
298  b3 = vec_mergeh( a3, a0 ); \
299  a0 = vec_mergeh( b0, b2 ); \
300  a1 = vec_mergel( b0, b2 ); \
301  a2 = vec_mergeh( b1, b3 ); \
302  a3 = vec_mergel( b1, b3 ); \
303  b0 = vec_mergeh( a0, a2 ); \
304  b1 = vec_mergel( a0, a2 ); \
305  b2 = vec_mergeh( a1, a3 ); \
306  b3 = vec_mergel( a1, a3 )
307 
308 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
309  vdst_orig = vec_ld(0, dst); \
310  vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
311  vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
312  va = vec_add(va, vdst_ss); \
313  va_u8 = vec_packsu(va, zero_s16v); \
314  va_u32 = vec_splat((vec_u32)va_u8, 0); \
315  vec_ste(va_u32, element, (uint32_t*)dst);
316 
317 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
318 {
319  vec_s16 va0, va1, va2, va3;
320  vec_s16 vz0, vz1, vz2, vz3;
321  vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
322  vec_u8 va_u8;
323  vec_u32 va_u32;
324  vec_s16 vdst_ss;
325  const vec_u16 v6us = vec_splat_u16(6);
326  vec_u8 vdst, vdst_orig;
327  vec_u8 vdst_mask = vec_lvsl(0, dst);
328  int element = ((unsigned long)dst & 0xf) >> 2;
329  LOAD_ZERO;
330 
331  block[0] += 32; /* add 32 as a DC-level for rounding */
332 
333  vtmp0 = vec_ld(0,block);
334  vtmp1 = vec_sld(vtmp0, vtmp0, 8);
335  vtmp2 = vec_ld(16,block);
336  vtmp3 = vec_sld(vtmp2, vtmp2, 8);
337 
338  VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
339  VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
340  VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
341 
342  va0 = vec_sra(va0,v6us);
343  va1 = vec_sra(va1,v6us);
344  va2 = vec_sra(va2,v6us);
345  va3 = vec_sra(va3,v6us);
346 
348  dst += stride;
350  dst += stride;
352  dst += stride;
354 }
355 
356 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
357  /* a0 = SRC(0) + SRC(4); */ \
358  vec_s16 a0v = vec_add(s0, s4); \
359  /* a2 = SRC(0) - SRC(4); */ \
360  vec_s16 a2v = vec_sub(s0, s4); \
361  /* a4 = (SRC(2)>>1) - SRC(6); */ \
362  vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
363  /* a6 = (SRC(6)>>1) + SRC(2); */ \
364  vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
365  /* b0 = a0 + a6; */ \
366  vec_s16 b0v = vec_add(a0v, a6v); \
367  /* b2 = a2 + a4; */ \
368  vec_s16 b2v = vec_add(a2v, a4v); \
369  /* b4 = a2 - a4; */ \
370  vec_s16 b4v = vec_sub(a2v, a4v); \
371  /* b6 = a0 - a6; */ \
372  vec_s16 b6v = vec_sub(a0v, a6v); \
373  /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
374  /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
375  vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
376  /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
377  /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
378  vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
379  /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
380  /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
381  vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
382  /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
383  vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
384  /* b1 = (a7>>2) + a1; */ \
385  vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
386  /* b3 = a3 + (a5>>2); */ \
387  vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
388  /* b5 = (a3>>2) - a5; */ \
389  vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
390  /* b7 = a7 - (a1>>2); */ \
391  vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
392  /* DST(0, b0 + b7); */ \
393  d0 = vec_add(b0v, b7v); \
394  /* DST(1, b2 + b5); */ \
395  d1 = vec_add(b2v, b5v); \
396  /* DST(2, b4 + b3); */ \
397  d2 = vec_add(b4v, b3v); \
398  /* DST(3, b6 + b1); */ \
399  d3 = vec_add(b6v, b1v); \
400  /* DST(4, b6 - b1); */ \
401  d4 = vec_sub(b6v, b1v); \
402  /* DST(5, b4 - b3); */ \
403  d5 = vec_sub(b4v, b3v); \
404  /* DST(6, b2 - b5); */ \
405  d6 = vec_sub(b2v, b5v); \
406  /* DST(7, b0 - b7); */ \
407  d7 = vec_sub(b0v, b7v); \
408 }
409 
410 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
411  /* unaligned load */ \
412  vec_u8 hv = vec_ld( 0, dest ); \
413  vec_u8 lv = vec_ld( 7, dest ); \
414  vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
415  vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
416  vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
417  vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
418  vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
419  vec_u8 edgehv; \
420  /* unaligned store */ \
421  vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
422  vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
423  lv = vec_sel( lv, bodyv, edgelv ); \
424  vec_st( lv, 7, dest ); \
425  hv = vec_ld( 0, dest ); \
426  edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
427  hv = vec_sel( hv, bodyv, edgehv ); \
428  vec_st( hv, 0, dest ); \
429  }
430 
431 static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
432  vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
433  vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
434  vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
435 
436  vec_u8 perm_ldv = vec_lvsl(0, dst);
437  vec_u8 perm_stv = vec_lvsr(8, dst);
438 
439  const vec_u16 onev = vec_splat_u16(1);
440  const vec_u16 twov = vec_splat_u16(2);
441  const vec_u16 sixv = vec_splat_u16(6);
442 
443  const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
444  LOAD_ZERO;
445 
446  dct[0] += 32; // rounding for the >>6 at the end
447 
448  s0 = vec_ld(0x00, (int16_t*)dct);
449  s1 = vec_ld(0x10, (int16_t*)dct);
450  s2 = vec_ld(0x20, (int16_t*)dct);
451  s3 = vec_ld(0x30, (int16_t*)dct);
452  s4 = vec_ld(0x40, (int16_t*)dct);
453  s5 = vec_ld(0x50, (int16_t*)dct);
454  s6 = vec_ld(0x60, (int16_t*)dct);
455  s7 = vec_ld(0x70, (int16_t*)dct);
456 
457  IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
458  d0, d1, d2, d3, d4, d5, d6, d7);
459 
460  TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
461 
462  IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
463  idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
464 
465  ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
466  ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
467  ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
468  ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
469  ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
470  ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
471  ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
472  ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
473 }
474 
475 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
476 {
477  vec_s16 dc16;
478  vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
479  LOAD_ZERO;
480  DECLARE_ALIGNED(16, int, dc);
481  int i;
482 
483  dc = (block[0] + 32) >> 6;
484  dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
485 
486  if (size == 4)
487  dc16 = vec_sld(dc16, zero_s16v, 8);
488  dcplus = vec_packsu(dc16, zero_s16v);
489  dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
490 
491  aligner = vec_lvsr(0, dst);
492  dcplus = vec_perm(dcplus, dcplus, aligner);
493  dcminus = vec_perm(dcminus, dcminus, aligner);
494 
495  for (i = 0; i < size; i += 4) {
496  v0 = vec_ld(0, dst+0*stride);
497  v1 = vec_ld(0, dst+1*stride);
498  v2 = vec_ld(0, dst+2*stride);
499  v3 = vec_ld(0, dst+3*stride);
500 
501  v0 = vec_adds(v0, dcplus);
502  v1 = vec_adds(v1, dcplus);
503  v2 = vec_adds(v2, dcplus);
504  v3 = vec_adds(v3, dcplus);
505 
506  v0 = vec_subs(v0, dcminus);
507  v1 = vec_subs(v1, dcminus);
508  v2 = vec_subs(v2, dcminus);
509  v3 = vec_subs(v3, dcminus);
510 
511  vec_st(v0, 0, dst+0*stride);
512  vec_st(v1, 0, dst+1*stride);
513  vec_st(v2, 0, dst+2*stride);
514  vec_st(v3, 0, dst+3*stride);
515 
516  dst += 4*stride;
517  }
518 }
519 
520 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
521 {
522  h264_idct_dc_add_internal(dst, block, stride, 4);
523 }
524 
525 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
526 {
527  h264_idct_dc_add_internal(dst, block, stride, 8);
528 }
529 
530 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
531  int i;
532  for(i=0; i<16; i++){
533  int nnz = nnzc[ scan8[i] ];
534  if(nnz){
535  if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
536  else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
537  }
538  }
539 }
540 
541 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
542  int i;
543  for(i=0; i<16; i++){
544  if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
545  else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
546  }
547 }
548 
549 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
550  int i;
551  for(i=0; i<16; i+=4){
552  int nnz = nnzc[ scan8[i] ];
553  if(nnz){
554  if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
555  else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
556  }
557  }
558 }
559 
560 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){
561  int i, j;
562  for (j = 1; j < 3; j++) {
563  for(i = j * 16; i < j * 16 + 4; i++){
564  if(nnzc[ scan8[i] ])
565  ff_h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
566  else if(block[i*16])
567  h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
568  }
569  }
570 }
571 
572 #define transpose4x16(r0, r1, r2, r3) { \
573  register vec_u8 r4; \
574  register vec_u8 r5; \
575  register vec_u8 r6; \
576  register vec_u8 r7; \
577  \
578  r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
579  r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
580  r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
581  r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
582  \
583  r0 = vec_mergeh(r4, r6); /*all set 0*/ \
584  r1 = vec_mergel(r4, r6); /*all set 1*/ \
585  r2 = vec_mergeh(r5, r7); /*all set 2*/ \
586  r3 = vec_mergel(r5, r7); /*all set 3*/ \
587 }
588 
589 static inline void write16x4(uint8_t *dst, int dst_stride,
590  register vec_u8 r0, register vec_u8 r1,
591  register vec_u8 r2, register vec_u8 r3) {
592  DECLARE_ALIGNED(16, unsigned char, result)[64];
593  uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
594  int int_dst_stride = dst_stride/4;
595 
596  vec_st(r0, 0, result);
597  vec_st(r1, 16, result);
598  vec_st(r2, 32, result);
599  vec_st(r3, 48, result);
600  /* FIXME: there has to be a better way!!!! */
601  *dst_int = *src_int;
602  *(dst_int+ int_dst_stride) = *(src_int + 1);
603  *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
604  *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
605  *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
606  *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
607  *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
608  *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
609  *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
610  *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
611  *(dst_int+10*int_dst_stride) = *(src_int + 10);
612  *(dst_int+11*int_dst_stride) = *(src_int + 11);
613  *(dst_int+12*int_dst_stride) = *(src_int + 12);
614  *(dst_int+13*int_dst_stride) = *(src_int + 13);
615  *(dst_int+14*int_dst_stride) = *(src_int + 14);
616  *(dst_int+15*int_dst_stride) = *(src_int + 15);
617 }
618 
622 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
623  register vec_u8 r0 = unaligned_load(0, src); \
624  register vec_u8 r1 = unaligned_load( src_stride, src); \
625  register vec_u8 r2 = unaligned_load(2* src_stride, src); \
626  register vec_u8 r3 = unaligned_load(3* src_stride, src); \
627  register vec_u8 r4 = unaligned_load(4* src_stride, src); \
628  register vec_u8 r5 = unaligned_load(5* src_stride, src); \
629  register vec_u8 r6 = unaligned_load(6* src_stride, src); \
630  register vec_u8 r7 = unaligned_load(7* src_stride, src); \
631  register vec_u8 r14 = unaligned_load(14*src_stride, src); \
632  register vec_u8 r15 = unaligned_load(15*src_stride, src); \
633  \
634  r8 = unaligned_load( 8*src_stride, src); \
635  r9 = unaligned_load( 9*src_stride, src); \
636  r10 = unaligned_load(10*src_stride, src); \
637  r11 = unaligned_load(11*src_stride, src); \
638  r12 = unaligned_load(12*src_stride, src); \
639  r13 = unaligned_load(13*src_stride, src); \
640  \
641  /*Merge first pairs*/ \
642  r0 = vec_mergeh(r0, r8); /*0, 8*/ \
643  r1 = vec_mergeh(r1, r9); /*1, 9*/ \
644  r2 = vec_mergeh(r2, r10); /*2,10*/ \
645  r3 = vec_mergeh(r3, r11); /*3,11*/ \
646  r4 = vec_mergeh(r4, r12); /*4,12*/ \
647  r5 = vec_mergeh(r5, r13); /*5,13*/ \
648  r6 = vec_mergeh(r6, r14); /*6,14*/ \
649  r7 = vec_mergeh(r7, r15); /*7,15*/ \
650  \
651  /*Merge second pairs*/ \
652  r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
653  r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
654  r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
655  r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
656  r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
657  r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
658  r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
659  r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
660  \
661  /*Third merge*/ \
662  r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
663  r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
664  r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
665  r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
666  r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
667  r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
668  /* Don't need to compute 3 and 7*/ \
669  \
670  /*Final merge*/ \
671  r8 = vec_mergeh(r0, r4); /*all set 0*/ \
672  r9 = vec_mergel(r0, r4); /*all set 1*/ \
673  r10 = vec_mergeh(r1, r5); /*all set 2*/ \
674  r11 = vec_mergel(r1, r5); /*all set 3*/ \
675  r12 = vec_mergeh(r2, r6); /*all set 4*/ \
676  r13 = vec_mergel(r2, r6); /*all set 5*/ \
677  /* Don't need to compute 14 and 15*/ \
678  \
679 }
680 
681 // out: o = |x-y| < a
682 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
683  register vec_u8 y,
684  register vec_u8 a) {
685 
686  register vec_u8 diff = vec_subs(x, y);
687  register vec_u8 diffneg = vec_subs(y, x);
688  register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
689  o = (vec_u8)vec_cmplt(o, a);
690  return o;
691 }
692 
693 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
694  register vec_u8 p1,
695  register vec_u8 q0,
696  register vec_u8 q1,
697  register vec_u8 alpha,
698  register vec_u8 beta) {
699 
700  register vec_u8 mask;
701  register vec_u8 tempmask;
702 
703  mask = diff_lt_altivec(p0, q0, alpha);
704  tempmask = diff_lt_altivec(p1, p0, beta);
705  mask = vec_and(mask, tempmask);
706  tempmask = diff_lt_altivec(q1, q0, beta);
707  mask = vec_and(mask, tempmask);
708 
709  return mask;
710 }
711 
712 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
713 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
714  register vec_u8 p1,
715  register vec_u8 p2,
716  register vec_u8 q0,
717  register vec_u8 tc0) {
718 
719  register vec_u8 average = vec_avg(p0, q0);
720  register vec_u8 temp;
721  register vec_u8 uncliped;
722  register vec_u8 ones;
723  register vec_u8 max;
724  register vec_u8 min;
725  register vec_u8 newp1;
726 
727  temp = vec_xor(average, p2);
728  average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
729  ones = vec_splat_u8(1);
730  temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
731  uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
732  max = vec_adds(p1, tc0);
733  min = vec_subs(p1, tc0);
734  newp1 = vec_max(min, uncliped);
735  newp1 = vec_min(max, newp1);
736  return newp1;
737 }
738 
739 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
740  \
741  const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
742  \
743  register vec_u8 pq0bit = vec_xor(p0,q0); \
744  register vec_u8 q1minus; \
745  register vec_u8 p0minus; \
746  register vec_u8 stage1; \
747  register vec_u8 stage2; \
748  register vec_u8 vec160; \
749  register vec_u8 delta; \
750  register vec_u8 deltaneg; \
751  \
752  q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
753  stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
754  stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
755  p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
756  stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
757  pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
758  stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
759  stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
760  vec160 = vec_ld(0, &A0v); \
761  deltaneg = vec_subs(vec160, stage2); /* -d */ \
762  delta = vec_subs(stage2, vec160); /* d */ \
763  deltaneg = vec_min(tc0masked, deltaneg); \
764  delta = vec_min(tc0masked, delta); \
765  p0 = vec_subs(p0, deltaneg); \
766  q0 = vec_subs(q0, delta); \
767  p0 = vec_adds(p0, delta); \
768  q0 = vec_adds(q0, deltaneg); \
769 }
770 
771 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
772  DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
773  register vec_u8 alphavec; \
774  register vec_u8 betavec; \
775  register vec_u8 mask; \
776  register vec_u8 p1mask; \
777  register vec_u8 q1mask; \
778  register vector signed char tc0vec; \
779  register vec_u8 finaltc0; \
780  register vec_u8 tc0masked; \
781  register vec_u8 newp1; \
782  register vec_u8 newq1; \
783  \
784  temp[0] = alpha; \
785  temp[1] = beta; \
786  alphavec = vec_ld(0, temp); \
787  betavec = vec_splat(alphavec, 0x1); \
788  alphavec = vec_splat(alphavec, 0x0); \
789  mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
790  \
791  *((int *)temp) = *((int *)tc0); \
792  tc0vec = vec_ld(0, (signed char*)temp); \
793  tc0vec = vec_mergeh(tc0vec, tc0vec); \
794  tc0vec = vec_mergeh(tc0vec, tc0vec); \
795  mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
796  finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
797  \
798  p1mask = diff_lt_altivec(p2, p0, betavec); \
799  p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
800  tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
801  finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
802  newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
803  /*end if*/ \
804  \
805  q1mask = diff_lt_altivec(q2, q0, betavec); \
806  q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
807  tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
808  finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
809  newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
810  /*end if*/ \
811  \
812  h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
813  p1 = newp1; \
814  q1 = newq1; \
815 }
816 
817 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
818 
819  if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
820  register vec_u8 p2 = vec_ld(-3*stride, pix);
821  register vec_u8 p1 = vec_ld(-2*stride, pix);
822  register vec_u8 p0 = vec_ld(-1*stride, pix);
823  register vec_u8 q0 = vec_ld(0, pix);
824  register vec_u8 q1 = vec_ld(stride, pix);
825  register vec_u8 q2 = vec_ld(2*stride, pix);
826  h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
827  vec_st(p1, -2*stride, pix);
828  vec_st(p0, -1*stride, pix);
829  vec_st(q0, 0, pix);
830  vec_st(q1, stride, pix);
831  }
832 }
833 
834 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
835 
836  register vec_u8 line0, line1, line2, line3, line4, line5;
837  if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
838  return;
839  readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
840  h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
841  transpose4x16(line1, line2, line3, line4);
842  write16x4(pix-2, stride, line1, line2, line3, line4);
843 }
844 
845 static av_always_inline
846 void weight_h264_W_altivec(uint8_t *block, int stride, int height,
847  int log2_denom, int weight, int offset, int w)
848 {
849  int y, aligned;
850  vec_u8 vblock;
851  vec_s16 vtemp, vweight, voffset, v0, v1;
852  vec_u16 vlog2_denom;
853  DECLARE_ALIGNED(16, int32_t, temp)[4];
854  LOAD_ZERO;
855 
856  offset <<= log2_denom;
857  if(log2_denom) offset += 1<<(log2_denom-1);
858  temp[0] = log2_denom;
859  temp[1] = weight;
860  temp[2] = offset;
861 
862  vtemp = (vec_s16)vec_ld(0, temp);
863  vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
864  vweight = vec_splat(vtemp, 3);
865  voffset = vec_splat(vtemp, 5);
866  aligned = !((unsigned long)block & 0xf);
867 
868  for (y = 0; y < height; y++) {
869  vblock = vec_ld(0, block);
870 
871  v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
872  v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
873 
874  if (w == 16 || aligned) {
875  v0 = vec_mladd(v0, vweight, zero_s16v);
876  v0 = vec_adds(v0, voffset);
877  v0 = vec_sra(v0, vlog2_denom);
878  }
879  if (w == 16 || !aligned) {
880  v1 = vec_mladd(v1, vweight, zero_s16v);
881  v1 = vec_adds(v1, voffset);
882  v1 = vec_sra(v1, vlog2_denom);
883  }
884  vblock = vec_packsu(v0, v1);
885  vec_st(vblock, 0, block);
886 
887  block += stride;
888  }
889 }
890 
891 static av_always_inline
892 void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
893  int log2_denom, int weightd, int weights, int offset, int w)
894 {
895  int y, dst_aligned, src_aligned;
896  vec_u8 vsrc, vdst;
897  vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
898  vec_u16 vlog2_denom;
899  DECLARE_ALIGNED(16, int32_t, temp)[4];
900  LOAD_ZERO;
901 
902  offset = ((offset + 1) | 1) << log2_denom;
903  temp[0] = log2_denom+1;
904  temp[1] = weights;
905  temp[2] = weightd;
906  temp[3] = offset;
907 
908  vtemp = (vec_s16)vec_ld(0, temp);
909  vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
910  vweights = vec_splat(vtemp, 3);
911  vweightd = vec_splat(vtemp, 5);
912  voffset = vec_splat(vtemp, 7);
913  dst_aligned = !((unsigned long)dst & 0xf);
914  src_aligned = !((unsigned long)src & 0xf);
915 
916  for (y = 0; y < height; y++) {
917  vdst = vec_ld(0, dst);
918  vsrc = vec_ld(0, src);
919 
920  v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
921  v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
922  v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
923  v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
924 
925  if (w == 8) {
926  if (src_aligned)
927  v3 = v2;
928  else
929  v2 = v3;
930  }
931 
932  if (w == 16 || dst_aligned) {
933  v0 = vec_mladd(v0, vweightd, zero_s16v);
934  v2 = vec_mladd(v2, vweights, zero_s16v);
935 
936  v0 = vec_adds(v0, voffset);
937  v0 = vec_adds(v0, v2);
938  v0 = vec_sra(v0, vlog2_denom);
939  }
940  if (w == 16 || !dst_aligned) {
941  v1 = vec_mladd(v1, vweightd, zero_s16v);
942  v3 = vec_mladd(v3, vweights, zero_s16v);
943 
944  v1 = vec_adds(v1, voffset);
945  v1 = vec_adds(v1, v3);
946  v1 = vec_sra(v1, vlog2_denom);
947  }
948  vdst = vec_packsu(v0, v1);
949  vec_st(vdst, 0, dst);
950 
951  dst += stride;
952  src += stride;
953  }
954 }
955 
956 #define H264_WEIGHT(W) \
957 static void ff_weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
958  int log2_denom, int weight, int offset){ \
959  weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
960 }\
961 static void ff_biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
962  int log2_denom, int weightd, int weights, int offset){ \
963  biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
964 }
965 
966 H264_WEIGHT(16)
967 H264_WEIGHT( 8)
968 
970  const int high_bit_depth = avctx->bits_per_raw_sample > 8;
971 
973  if (!high_bit_depth) {
974  c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
975  c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
976 
977 #define dspfunc(PFX, IDX, NUM) \
978  c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
979  c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
980  c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
981  c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
982  c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
983  c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
984  c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
985  c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
986  c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
987  c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
988  c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
989  c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
990  c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
991  c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
992  c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
993  c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
994 
995  dspfunc(put_h264_qpel, 0, 16);
996  dspfunc(avg_h264_qpel, 0, 16);
997 #undef dspfunc
998  }
999  }
1000 }
1001 
1002 void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
1003 {
1005  if (bit_depth == 8) {
1007  if (chroma_format_idc <= 1)
1017 
1018  c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16_altivec;
1019  c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels8_altivec;
1020  c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16_altivec;
1021  c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels8_altivec;
1022  }
1023  }
1024 }