29 DECLARE_ALIGNED(16,
static const uint16_t, pw_7f)[8] = {0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F};
30 DECLARE_ALIGNED(16,
static const uint16_t, pw_ff)[8] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
32 #if HAVE_MMXEXT_INLINE
33 static void gradfun_filter_line_mmxext(
uint8_t *dst,
uint8_t *src, uint16_t *dc,
34 int width,
int thresh,
35 const uint16_t *dithers)
46 "pxor %%mm7, %%mm7 \n"
47 "pshufw $0, %%mm5, %%mm5 \n"
51 "movd (%2,%0), %%mm0 \n"
52 "movd (%3,%0), %%mm1 \n"
53 "punpcklbw %%mm7, %%mm0 \n"
54 "punpcklwd %%mm1, %%mm1 \n"
56 "pxor %%mm2, %%mm2 \n"
57 "psubw %%mm0, %%mm1 \n"
58 "psubw %%mm1, %%mm2 \n"
59 "pmaxsw %%mm1, %%mm2 \n"
60 "pmulhuw %%mm5, %%mm2 \n"
61 "psubw %%mm6, %%mm2 \n"
62 "pminsw %%mm7, %%mm2 \n"
63 "pmullw %%mm2, %%mm2 \n"
64 "paddw %%mm4, %%mm0 \n"
65 "pmulhw %%mm2, %%mm1 \n"
67 "paddw %%mm1, %%mm0 \n"
69 "packuswb %%mm0, %%mm0 \n"
70 "movd %%mm0, (%1,%0) \n"
75 :
"r"(dst+width),
"r"(src+
width),
"r"(dc+width/2),
76 "rm"(thresh),
"m"(*dithers),
"m"(*pw_7f)
83 static void gradfun_filter_line_ssse3(
uint8_t *dst,
uint8_t *src, uint16_t *dc,
int width,
int thresh,
const uint16_t *dithers)
95 "pxor %%xmm7, %%xmm7 \n"
96 "pshuflw $0,%%xmm5, %%xmm5 \n"
97 "movdqa %6, %%xmm6 \n"
98 "punpcklqdq %%xmm5, %%xmm5 \n"
99 "movdqa %5, %%xmm4 \n"
101 "movq (%2,%0), %%xmm0 \n"
102 "movq (%3,%0), %%xmm1 \n"
103 "punpcklbw %%xmm7, %%xmm0 \n"
104 "punpcklwd %%xmm1, %%xmm1 \n"
105 "psllw $7, %%xmm0 \n"
106 "psubw %%xmm0, %%xmm1 \n"
107 "pabsw %%xmm1, %%xmm2 \n"
108 "pmulhuw %%xmm5, %%xmm2 \n"
109 "psubw %%xmm6, %%xmm2 \n"
110 "pminsw %%xmm7, %%xmm2 \n"
111 "pmullw %%xmm2, %%xmm2 \n"
112 "psllw $1, %%xmm2 \n"
113 "paddw %%xmm4, %%xmm0 \n"
114 "pmulhrsw %%xmm2, %%xmm1 \n"
115 "paddw %%xmm1, %%xmm0 \n"
116 "psraw $7, %%xmm0 \n"
117 "packuswb %%xmm0, %%xmm0 \n"
118 "movq %%xmm0, (%1,%0) \n"
122 :
"r"(dst+width),
"r"(src+
width),
"r"(dc+width/2),
123 "rm"(thresh),
"m"(*dithers),
"m"(*pw_7f)
130 static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, uint16_t *buf1,
uint8_t *src,
int src_linesize,
int width)
133 intptr_t x = -2*width;\
135 "movdqa %6, %%xmm7 \n"\
137 load" (%4,%0), %%xmm0 \n"\
138 load" (%5,%0), %%xmm1 \n"\
139 "movdqa %%xmm0, %%xmm2 \n"\
140 "movdqa %%xmm1, %%xmm3 \n"\
141 "psrlw $8, %%xmm0 \n"\
142 "psrlw $8, %%xmm1 \n"\
143 "pand %%xmm7, %%xmm2 \n"\
144 "pand %%xmm7, %%xmm3 \n"\
145 "paddw %%xmm1, %%xmm0 \n"\
146 "paddw %%xmm3, %%xmm2 \n"\
147 "paddw %%xmm2, %%xmm0 \n"\
148 "paddw (%2,%0), %%xmm0 \n"\
149 "movdqa (%1,%0), %%xmm1 \n"\
150 "movdqa %%xmm0, (%1,%0) \n"\
151 "psubw %%xmm1, %%xmm0 \n"\
152 "movdqa %%xmm0, (%3,%0) \n"\
160 "r"(src+width*2+src_linesize),\
164 if (((intptr_t) src | src_linesize) & 15) {
178 #if HAVE_MMXEXT_INLINE
182 #if HAVE_SSSE3_INLINE