40 vector
signed char vpix1;
41 vector
signed short vpix2, vdiff, vpix1l,vpix1h;
42 union { vector
signed int vscore;
45 u.vscore = vec_splat_s32(0);
49 #define vec_unaligned_load(b) \
50 vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
61 vpix1h = vec_unpackh(vpix1);
62 vdiff = vec_sub(vpix1h, vpix2);
63 vpix1l = vec_unpackl(vpix1);
66 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
67 vdiff = vec_sub(vpix1l, vpix2);
68 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
73 u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
76 for (i = 0; i <
size; i++) {
77 u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
87 register vec_s32 res = vec_splat_s32(0),
t;
92 if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
93 if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
94 if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
95 if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
96 if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
98 for(i = 0; i < order; i += 8){
100 vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
102 t = vec_sr(
t, shifts);
103 res = vec_sums(
t, res);
107 res = vec_splat(res, 3);
108 vec_ste(res, 0, &ires);
118 register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
120 register vec_s16 i2 = pv2[0], i3 = pv3[0];
122 register vec_u8 align = vec_lvsl(0, v2);
126 t0 = vec_perm(i2, pv2[1], align);
128 t1 = vec_perm(pv2[1], i2, align);
131 res = vec_msum(t0, i0, res);
132 res = vec_msum(t1, i1, res);
133 t0 = vec_perm(i3, pv3[1], align);
135 t1 = vec_perm(pv3[1], i3, align);
136 pv1[0] = vec_mladd(t0, muls, i0);
137 pv1[1] = vec_mladd(t1, muls, i1);
142 res = vec_splat(vec_sums(res,
zero_s32v), 3);
143 vec_ste(res, 0, &ires);