vc1dsp_altivec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
3  * Copyright (c) 2006 Konstantin Shishkov
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavcodec/dsputil.h"
23 #include "libavcodec/vc1dsp.h"
24 
25 #include "util_altivec.h"
26 #include "dsputil_altivec.h"
27 
28 // main steps of 8x8 transform
29 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
30 do { \
31  t0 = vec_sl(vec_add(s0, s4), vec_2); \
32  t0 = vec_add(vec_sl(t0, vec_1), t0); \
33  t0 = vec_add(t0, vec_rnd); \
34  t1 = vec_sl(vec_sub(s0, s4), vec_2); \
35  t1 = vec_add(vec_sl(t1, vec_1), t1); \
36  t1 = vec_add(t1, vec_rnd); \
37  t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
38  t2 = vec_add(t2, vec_sl(s2, vec_4)); \
39  t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
40  t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
41  t4 = vec_add(t0, t2); \
42  t5 = vec_add(t1, t3); \
43  t6 = vec_sub(t1, t3); \
44  t7 = vec_sub(t0, t2); \
45 \
46  t0 = vec_sl(vec_add(s1, s3), vec_4); \
47  t0 = vec_add(t0, vec_sl(s5, vec_3)); \
48  t0 = vec_add(t0, vec_sl(s7, vec_2)); \
49  t0 = vec_add(t0, vec_sub(s5, s3)); \
50 \
51  t1 = vec_sl(vec_sub(s1, s5), vec_4); \
52  t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
53  t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
54  t1 = vec_sub(t1, vec_add(s1, s7)); \
55 \
56  t2 = vec_sl(vec_sub(s7, s3), vec_4); \
57  t2 = vec_add(t2, vec_sl(s1, vec_3)); \
58  t2 = vec_add(t2, vec_sl(s5, vec_2)); \
59  t2 = vec_add(t2, vec_sub(s1, s7)); \
60 \
61  t3 = vec_sl(vec_sub(s5, s7), vec_4); \
62  t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
63  t3 = vec_add(t3, vec_sl(s1, vec_2)); \
64  t3 = vec_sub(t3, vec_add(s3, s5)); \
65 \
66  s0 = vec_add(t4, t0); \
67  s1 = vec_add(t5, t1); \
68  s2 = vec_add(t6, t2); \
69  s3 = vec_add(t7, t3); \
70  s4 = vec_sub(t7, t3); \
71  s5 = vec_sub(t6, t2); \
72  s6 = vec_sub(t5, t1); \
73  s7 = vec_sub(t4, t0); \
74 }while(0)
75 
76 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
77 do { \
78  s0 = vec_sra(s0, vec_3); \
79  s1 = vec_sra(s1, vec_3); \
80  s2 = vec_sra(s2, vec_3); \
81  s3 = vec_sra(s3, vec_3); \
82  s4 = vec_sra(s4, vec_3); \
83  s5 = vec_sra(s5, vec_3); \
84  s6 = vec_sra(s6, vec_3); \
85  s7 = vec_sra(s7, vec_3); \
86 }while(0)
87 
88 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
89 do { \
90  s0 = vec_sra(s0, vec_7); \
91  s1 = vec_sra(s1, vec_7); \
92  s2 = vec_sra(s2, vec_7); \
93  s3 = vec_sra(s3, vec_7); \
94  s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
95  s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
96  s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
97  s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
98 }while(0)
99 
100 /* main steps of 4x4 transform */
101 #define STEP4(s0, s1, s2, s3, vec_rnd) \
102 do { \
103  t1 = vec_add(vec_sl(s0, vec_4), s0); \
104  t1 = vec_add(t1, vec_rnd); \
105  t2 = vec_add(vec_sl(s2, vec_4), s2); \
106  t0 = vec_add(t1, t2); \
107  t1 = vec_sub(t1, t2); \
108  t3 = vec_sl(vec_sub(s3, s1), vec_1); \
109  t3 = vec_add(t3, vec_sl(t3, vec_2)); \
110  t2 = vec_add(t3, vec_sl(s1, vec_5)); \
111  t3 = vec_add(t3, vec_sl(s3, vec_3)); \
112  t3 = vec_add(t3, vec_sl(s3, vec_2)); \
113  s0 = vec_add(t0, t2); \
114  s1 = vec_sub(t1, t3); \
115  s2 = vec_add(t1, t3); \
116  s3 = vec_sub(t0, t2); \
117 }while (0)
118 
119 #define SHIFT_HOR4(s0, s1, s2, s3) \
120  s0 = vec_sra(s0, vec_3); \
121  s1 = vec_sra(s1, vec_3); \
122  s2 = vec_sra(s2, vec_3); \
123  s3 = vec_sra(s3, vec_3);
124 
125 #define SHIFT_VERT4(s0, s1, s2, s3) \
126  s0 = vec_sra(s0, vec_7); \
127  s1 = vec_sra(s1, vec_7); \
128  s2 = vec_sra(s2, vec_7); \
129  s3 = vec_sra(s3, vec_7);
130 
134 {
135  vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
136  vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
137  vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
138  vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
139  const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
140  const vector unsigned int vec_7 = vec_splat_u32(7);
141  const vector unsigned int vec_4 = vec_splat_u32(4);
142  const vector signed int vec_4s = vec_splat_s32(4);
143  const vector unsigned int vec_3 = vec_splat_u32(3);
144  const vector unsigned int vec_2 = vec_splat_u32(2);
145  const vector signed int vec_1s = vec_splat_s32(1);
146  const vector unsigned int vec_1 = vec_splat_u32(1);
147 
148  src0 = vec_ld( 0, block);
149  src1 = vec_ld( 16, block);
150  src2 = vec_ld( 32, block);
151  src3 = vec_ld( 48, block);
152  src4 = vec_ld( 64, block);
153  src5 = vec_ld( 80, block);
154  src6 = vec_ld( 96, block);
155  src7 = vec_ld(112, block);
156 
157  s0 = vec_unpackl(src0);
158  s1 = vec_unpackl(src1);
159  s2 = vec_unpackl(src2);
160  s3 = vec_unpackl(src3);
161  s4 = vec_unpackl(src4);
162  s5 = vec_unpackl(src5);
163  s6 = vec_unpackl(src6);
164  s7 = vec_unpackl(src7);
165  s8 = vec_unpackh(src0);
166  s9 = vec_unpackh(src1);
167  sA = vec_unpackh(src2);
168  sB = vec_unpackh(src3);
169  sC = vec_unpackh(src4);
170  sD = vec_unpackh(src5);
171  sE = vec_unpackh(src6);
172  sF = vec_unpackh(src7);
173  STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
174  SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
175  STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
176  SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
177  src0 = vec_pack(s8, s0);
178  src1 = vec_pack(s9, s1);
179  src2 = vec_pack(sA, s2);
180  src3 = vec_pack(sB, s3);
181  src4 = vec_pack(sC, s4);
182  src5 = vec_pack(sD, s5);
183  src6 = vec_pack(sE, s6);
184  src7 = vec_pack(sF, s7);
185  TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
186 
187  s0 = vec_unpackl(src0);
188  s1 = vec_unpackl(src1);
189  s2 = vec_unpackl(src2);
190  s3 = vec_unpackl(src3);
191  s4 = vec_unpackl(src4);
192  s5 = vec_unpackl(src5);
193  s6 = vec_unpackl(src6);
194  s7 = vec_unpackl(src7);
195  s8 = vec_unpackh(src0);
196  s9 = vec_unpackh(src1);
197  sA = vec_unpackh(src2);
198  sB = vec_unpackh(src3);
199  sC = vec_unpackh(src4);
200  sD = vec_unpackh(src5);
201  sE = vec_unpackh(src6);
202  sF = vec_unpackh(src7);
203  STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
204  SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
205  STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
206  SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
207  src0 = vec_pack(s8, s0);
208  src1 = vec_pack(s9, s1);
209  src2 = vec_pack(sA, s2);
210  src3 = vec_pack(sB, s3);
211  src4 = vec_pack(sC, s4);
212  src5 = vec_pack(sD, s5);
213  src6 = vec_pack(sE, s6);
214  src7 = vec_pack(sF, s7);
215 
216  vec_st(src0, 0, block);
217  vec_st(src1, 16, block);
218  vec_st(src2, 32, block);
219  vec_st(src3, 48, block);
220  vec_st(src4, 64, block);
221  vec_st(src5, 80, block);
222  vec_st(src6, 96, block);
223  vec_st(src7,112, block);
224 }
225 
228 static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
229 {
230  vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
231  vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
232  vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
233  vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
234  const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
235  const vector unsigned int vec_7 = vec_splat_u32(7);
236  const vector unsigned int vec_5 = vec_splat_u32(5);
237  const vector unsigned int vec_4 = vec_splat_u32(4);
238  const vector signed int vec_4s = vec_splat_s32(4);
239  const vector unsigned int vec_3 = vec_splat_u32(3);
240  const vector unsigned int vec_2 = vec_splat_u32(2);
241  const vector unsigned int vec_1 = vec_splat_u32(1);
242  vector unsigned char tmp;
243  vector signed short tmp2, tmp3;
244  vector unsigned char perm0, perm1, p0, p1, p;
245 
246  src0 = vec_ld( 0, block);
247  src1 = vec_ld( 16, block);
248  src2 = vec_ld( 32, block);
249  src3 = vec_ld( 48, block);
250  src4 = vec_ld( 64, block);
251  src5 = vec_ld( 80, block);
252  src6 = vec_ld( 96, block);
253  src7 = vec_ld(112, block);
254 
255  TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
256  s0 = vec_unpackl(src0);
257  s1 = vec_unpackl(src1);
258  s2 = vec_unpackl(src2);
259  s3 = vec_unpackl(src3);
260  s4 = vec_unpackl(src4);
261  s5 = vec_unpackl(src5);
262  s6 = vec_unpackl(src6);
263  s7 = vec_unpackl(src7);
264  s8 = vec_unpackh(src0);
265  s9 = vec_unpackh(src1);
266  sA = vec_unpackh(src2);
267  sB = vec_unpackh(src3);
268  sC = vec_unpackh(src4);
269  sD = vec_unpackh(src5);
270  sE = vec_unpackh(src6);
271  sF = vec_unpackh(src7);
272  STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
273  SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
274  STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
275  SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
276  src0 = vec_pack(s8, s0);
277  src1 = vec_pack(s9, s1);
278  src2 = vec_pack(sA, s2);
279  src3 = vec_pack(sB, s3);
280  src4 = vec_pack(sC, s4);
281  src5 = vec_pack(sD, s5);
282  src6 = vec_pack(sE, s6);
283  src7 = vec_pack(sF, s7);
284  TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
285 
286  s0 = vec_unpackh(src0);
287  s1 = vec_unpackh(src1);
288  s2 = vec_unpackh(src2);
289  s3 = vec_unpackh(src3);
290  s8 = vec_unpackl(src0);
291  s9 = vec_unpackl(src1);
292  sA = vec_unpackl(src2);
293  sB = vec_unpackl(src3);
294  STEP4(s0, s1, s2, s3, vec_64);
295  SHIFT_VERT4(s0, s1, s2, s3);
296  STEP4(s8, s9, sA, sB, vec_64);
297  SHIFT_VERT4(s8, s9, sA, sB);
298  src0 = vec_pack(s0, s8);
299  src1 = vec_pack(s1, s9);
300  src2 = vec_pack(s2, sA);
301  src3 = vec_pack(s3, sB);
302 
303  p0 = vec_lvsl (0, dest);
304  p1 = vec_lvsl (stride, dest);
305  p = vec_splat_u8 (-1);
306  perm0 = vec_mergeh (p, p0);
307  perm1 = vec_mergeh (p, p1);
308 
309 #define ADD(dest,src,perm) \
310  /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
311  tmp = vec_ld (0, dest); \
312  tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \
313  tmp3 = vec_adds (tmp2, src); \
314  tmp = vec_packsu (tmp3, tmp3); \
315  vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
316  vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);
317 
318  ADD (dest, src0, perm0) dest += stride;
319  ADD (dest, src1, perm1) dest += stride;
320  ADD (dest, src2, perm0) dest += stride;
321  ADD (dest, src3, perm1)
322 }
323 
324 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
325 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
326 
327 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
328 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
329 #include "h264_template_altivec.c"
330 #undef OP_U8_ALTIVEC
331 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
332 
333 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
334 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
335 #include "h264_template_altivec.c"
336 #undef OP_U8_ALTIVEC
337 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
338 
340 {
342  return;
343 
346  dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
347  dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
348 }