output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <assert.h>
22 #include <math.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <string.h>
26 
27 #include "libavutil/attributes.h"
28 #include "libavutil/avutil.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "rgb2rgb.h"
36 #include "swscale.h"
37 #include "swscale_internal.h"
38 
39 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 };
43 
44 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={
45 { 6, 2, 6, 2, 6, 2, 6, 2, },
46 { 0, 4, 0, 4, 0, 4, 0, 4, },
47 };
48 
50 { 8, 4, 11, 7, 8, 4, 11, 7, },
51 { 2, 14, 1, 13, 2, 14, 1, 13, },
52 { 10, 6, 9, 5, 10, 6, 9, 5, },
53 { 0, 12, 3, 15, 0, 12, 3, 15, },
54 };
55 
57 { 17, 9, 23, 15, 16, 8, 22, 14, },
58 { 5, 29, 3, 27, 4, 28, 2, 26, },
59 { 21, 13, 19, 11, 20, 12, 18, 10, },
60 { 0, 24, 6, 30, 1, 25, 7, 31, },
61 { 16, 8, 22, 14, 17, 9, 23, 15, },
62 { 4, 28, 2, 26, 5, 29, 3, 27, },
63 { 20, 12, 18, 10, 21, 13, 19, 11, },
64 { 1, 25, 7, 31, 0, 24, 6, 30, },
65 };
66 
68 { 0, 55, 14, 68, 3, 58, 17, 72, },
69 { 37, 18, 50, 32, 40, 22, 54, 35, },
70 { 9, 64, 5, 59, 13, 67, 8, 63, },
71 { 46, 27, 41, 23, 49, 31, 44, 26, },
72 { 2, 57, 16, 71, 1, 56, 15, 70, },
73 { 39, 21, 52, 34, 38, 19, 51, 33, },
74 { 11, 66, 7, 62, 10, 65, 6, 60, },
75 { 48, 30, 43, 25, 47, 29, 42, 24, },
76 };
77 
78 #if 1
80 {117, 62, 158, 103, 113, 58, 155, 100, },
81 { 34, 199, 21, 186, 31, 196, 17, 182, },
82 {144, 89, 131, 76, 141, 86, 127, 72, },
83 { 0, 165, 41, 206, 10, 175, 52, 217, },
84 {110, 55, 151, 96, 120, 65, 162, 107, },
85 { 28, 193, 14, 179, 38, 203, 24, 189, },
86 {138, 83, 124, 69, 148, 93, 134, 79, },
87 { 7, 172, 48, 213, 3, 168, 45, 210, },
88 };
89 #elif 1
90 // tries to correct a gamma of 1.5
91 DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
92 { 0, 143, 18, 200, 2, 156, 25, 215, },
93 { 78, 28, 125, 64, 89, 36, 138, 74, },
94 { 10, 180, 3, 161, 16, 195, 8, 175, },
95 {109, 51, 93, 38, 121, 60, 105, 47, },
96 { 1, 152, 23, 210, 0, 147, 20, 205, },
97 { 85, 33, 134, 71, 81, 30, 130, 67, },
98 { 14, 190, 6, 171, 12, 185, 5, 166, },
99 {117, 57, 101, 44, 113, 54, 97, 41, },
100 };
101 #elif 1
102 // tries to correct a gamma of 2.0
103 DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
104 { 0, 124, 8, 193, 0, 140, 12, 213, },
105 { 55, 14, 104, 42, 66, 19, 119, 52, },
106 { 3, 168, 1, 145, 6, 187, 3, 162, },
107 { 86, 31, 70, 21, 99, 39, 82, 28, },
108 { 0, 134, 11, 206, 0, 129, 9, 200, },
109 { 62, 17, 114, 48, 58, 16, 109, 45, },
110 { 5, 181, 2, 157, 4, 175, 1, 151, },
111 { 95, 36, 78, 26, 90, 34, 74, 24, },
112 };
113 #else
114 // tries to correct a gamma of 2.5
115 DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
116 { 0, 107, 3, 187, 0, 125, 6, 212, },
117 { 39, 7, 86, 28, 49, 11, 102, 36, },
118 { 1, 158, 0, 131, 3, 180, 1, 151, },
119 { 68, 19, 52, 12, 81, 25, 64, 17, },
120 { 0, 119, 5, 203, 0, 113, 4, 195, },
121 { 45, 9, 96, 33, 42, 8, 91, 30, },
122 { 2, 172, 1, 144, 2, 165, 0, 137, },
123 { 77, 23, 60, 15, 72, 21, 56, 14, },
124 };
125 #endif
126 
127 #define output_pixel(pos, val, bias, signedness) \
128  if (big_endian) { \
129  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
130  } else { \
131  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
132  }
133 
134 static av_always_inline void
135 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
136  int big_endian, int output_bits)
137 {
138  int i;
139  int shift = 19 - output_bits;
140 
141  for (i = 0; i < dstW; i++) {
142  int val = src[i] + (1 << (shift - 1));
143  output_pixel(&dest[i], val, 0, uint);
144  }
145 }
146 
147 static av_always_inline void
148 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
149  const int32_t **src, uint16_t *dest, int dstW,
150  int big_endian, int output_bits)
151 {
152  int i;
153  int shift = 15 + 16 - output_bits;
154 
155  for (i = 0; i < dstW; i++) {
156  int val = 1 << (30-output_bits);
157  int j;
158 
159  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
160  * filters (or anything with negative coeffs, the range can be slightly
161  * wider in both directions. To account for this overflow, we subtract
162  * a constant so it always fits in the signed range (assuming a
163  * reasonable filterSize), and re-add that at the end. */
164  val -= 0x40000000;
165  for (j = 0; j < filterSize; j++)
166  val += src[j][i] * filter[j];
167 
168  output_pixel(&dest[i], val, 0x8000, int);
169  }
170 }
171 
172 #undef output_pixel
173 
174 #define output_pixel(pos, val) \
175  if (big_endian) { \
176  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
177  } else { \
178  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
179  }
180 
181 static av_always_inline void
182 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
183  int big_endian, int output_bits)
184 {
185  int i;
186  int shift = 15 - output_bits;
187 
188  for (i = 0; i < dstW; i++) {
189  int val = src[i] + (1 << (shift - 1));
190  output_pixel(&dest[i], val);
191  }
192 }
193 
194 static av_always_inline void
195 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
196  const int16_t **src, uint16_t *dest, int dstW,
197  int big_endian, int output_bits)
198 {
199  int i;
200  int shift = 11 + 16 - output_bits;
201 
202  for (i = 0; i < dstW; i++) {
203  int val = 1 << (26-output_bits);
204  int j;
205 
206  for (j = 0; j < filterSize; j++)
207  val += src[j][i] * filter[j];
208 
209  output_pixel(&dest[i], val);
210  }
211 }
212 
213 #undef output_pixel
214 
215 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
216 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
217  uint8_t *dest, int dstW, \
218  const uint8_t *dither, int offset)\
219 { \
220  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
221  (uint16_t *) dest, dstW, is_be, bits); \
222 }\
223 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
224  const int16_t **src, uint8_t *dest, int dstW, \
225  const uint8_t *dither, int offset)\
226 { \
227  yuv2planeX_## template_size ## _c_template(filter, \
228  filterSize, (const typeX_t **) src, \
229  (uint16_t *) dest, dstW, is_be, bits); \
230 }
231 yuv2NBPS( 9, BE, 1, 10, int16_t)
232 yuv2NBPS( 9, LE, 0, 10, int16_t)
233 yuv2NBPS(10, BE, 1, 10, int16_t)
234 yuv2NBPS(10, LE, 0, 10, int16_t)
235 yuv2NBPS(16, BE, 1, 16, int32_t)
236 yuv2NBPS(16, LE, 0, 16, int32_t)
237 
238 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
239  const int16_t **src, uint8_t *dest, int dstW,
240  const uint8_t *dither, int offset)
241 {
242  int i;
243  for (i=0; i<dstW; i++) {
244  int val = dither[(i + offset) & 7] << 12;
245  int j;
246  for (j=0; j<filterSize; j++)
247  val += src[j][i] * filter[j];
248 
249  dest[i]= av_clip_uint8(val>>19);
250  }
251 }
252 
253 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
254  const uint8_t *dither, int offset)
255 {
256  int i;
257  for (i=0; i<dstW; i++) {
258  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
259  dest[i]= av_clip_uint8(val);
260  }
261 }
262 
263 static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize,
264  const int16_t **chrUSrc, const int16_t **chrVSrc,
265  uint8_t *dest, int chrDstW)
266 {
267  enum AVPixelFormat dstFormat = c->dstFormat;
268  const uint8_t *chrDither = c->chrDither8;
269  int i;
270 
271  if (dstFormat == AV_PIX_FMT_NV12)
272  for (i=0; i<chrDstW; i++) {
273  int u = chrDither[i & 7] << 12;
274  int v = chrDither[(i + 3) & 7] << 12;
275  int j;
276  for (j=0; j<chrFilterSize; j++) {
277  u += chrUSrc[j][i] * chrFilter[j];
278  v += chrVSrc[j][i] * chrFilter[j];
279  }
280 
281  dest[2*i]= av_clip_uint8(u>>19);
282  dest[2*i+1]= av_clip_uint8(v>>19);
283  }
284  else
285  for (i=0; i<chrDstW; i++) {
286  int u = chrDither[i & 7] << 12;
287  int v = chrDither[(i + 3) & 7] << 12;
288  int j;
289  for (j=0; j<chrFilterSize; j++) {
290  u += chrUSrc[j][i] * chrFilter[j];
291  v += chrVSrc[j][i] * chrFilter[j];
292  }
293 
294  dest[2*i]= av_clip_uint8(v>>19);
295  dest[2*i+1]= av_clip_uint8(u>>19);
296  }
297 }
298 
299 #define accumulate_bit(acc, val) \
300  acc <<= 1; \
301  acc |= (val) >= (128 + 110)
302 #define output_pixel(pos, acc) \
303  if (target == AV_PIX_FMT_MONOBLACK) { \
304  pos = acc; \
305  } else { \
306  pos = ~acc; \
307  }
308 
309 static av_always_inline void
310 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
311  const int16_t **lumSrc, int lumFilterSize,
312  const int16_t *chrFilter, const int16_t **chrUSrc,
313  const int16_t **chrVSrc, int chrFilterSize,
314  const int16_t **alpSrc, uint8_t *dest, int dstW,
315  int y, enum AVPixelFormat target)
316 {
317  const uint8_t * const d128=dither_8x8_220[y&7];
318  int i;
319  unsigned acc = 0;
320 
321  for (i = 0; i < dstW; i += 2) {
322  int j;
323  int Y1 = 1 << 18;
324  int Y2 = 1 << 18;
325 
326  for (j = 0; j < lumFilterSize; j++) {
327  Y1 += lumSrc[j][i] * lumFilter[j];
328  Y2 += lumSrc[j][i+1] * lumFilter[j];
329  }
330  Y1 >>= 19;
331  Y2 >>= 19;
332  if ((Y1 | Y2) & 0x100) {
333  Y1 = av_clip_uint8(Y1);
334  Y2 = av_clip_uint8(Y2);
335  }
336  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
337  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
338  if ((i & 7) == 6) {
339  output_pixel(*dest++, acc);
340  }
341  }
342 
343  if (i & 6) {
344  output_pixel(*dest, acc);
345  }
346 }
347 
348 static av_always_inline void
349 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
350  const int16_t *ubuf[2], const int16_t *vbuf[2],
351  const int16_t *abuf[2], uint8_t *dest, int dstW,
352  int yalpha, int uvalpha, int y,
353  enum AVPixelFormat target)
354 {
355  const int16_t *buf0 = buf[0], *buf1 = buf[1];
356  const uint8_t * const d128 = dither_8x8_220[y & 7];
357  int yalpha1 = 4096 - yalpha;
358  int i;
359 
360  for (i = 0; i < dstW; i += 8) {
361  int Y, acc = 0;
362 
363  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
364  accumulate_bit(acc, Y + d128[0]);
365  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
366  accumulate_bit(acc, Y + d128[1]);
367  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
368  accumulate_bit(acc, Y + d128[2]);
369  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
370  accumulate_bit(acc, Y + d128[3]);
371  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
372  accumulate_bit(acc, Y + d128[4]);
373  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
374  accumulate_bit(acc, Y + d128[5]);
375  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
376  accumulate_bit(acc, Y + d128[6]);
377  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
378  accumulate_bit(acc, Y + d128[7]);
379 
380  output_pixel(*dest++, acc);
381  }
382 }
383 
384 static av_always_inline void
385 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
386  const int16_t *ubuf[2], const int16_t *vbuf[2],
387  const int16_t *abuf0, uint8_t *dest, int dstW,
388  int uvalpha, int y, enum AVPixelFormat target)
389 {
390  const uint8_t * const d128 = dither_8x8_220[y & 7];
391  int i;
392 
393  for (i = 0; i < dstW; i += 8) {
394  int acc = 0;
395 
396  accumulate_bit(acc, (buf0[i + 0] >> 7) + d128[0]);
397  accumulate_bit(acc, (buf0[i + 1] >> 7) + d128[1]);
398  accumulate_bit(acc, (buf0[i + 2] >> 7) + d128[2]);
399  accumulate_bit(acc, (buf0[i + 3] >> 7) + d128[3]);
400  accumulate_bit(acc, (buf0[i + 4] >> 7) + d128[4]);
401  accumulate_bit(acc, (buf0[i + 5] >> 7) + d128[5]);
402  accumulate_bit(acc, (buf0[i + 6] >> 7) + d128[6]);
403  accumulate_bit(acc, (buf0[i + 7] >> 7) + d128[7]);
404 
405  output_pixel(*dest++, acc);
406  }
407 }
408 
409 #undef output_pixel
410 #undef accumulate_bit
411 
412 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
413 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
414  const int16_t **lumSrc, int lumFilterSize, \
415  const int16_t *chrFilter, const int16_t **chrUSrc, \
416  const int16_t **chrVSrc, int chrFilterSize, \
417  const int16_t **alpSrc, uint8_t *dest, int dstW, \
418  int y) \
419 { \
420  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
421  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
422  alpSrc, dest, dstW, y, fmt); \
423 } \
424  \
425 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
426  const int16_t *ubuf[2], const int16_t *vbuf[2], \
427  const int16_t *abuf[2], uint8_t *dest, int dstW, \
428  int yalpha, int uvalpha, int y) \
429 { \
430  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
431  dest, dstW, yalpha, uvalpha, y, fmt); \
432 } \
433  \
434 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
435  const int16_t *ubuf[2], const int16_t *vbuf[2], \
436  const int16_t *abuf0, uint8_t *dest, int dstW, \
437  int uvalpha, int y) \
438 { \
439  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
440  abuf0, dest, dstW, uvalpha, \
441  y, fmt); \
442 }
443 
444 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
445 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
446 
447 #define output_pixels(pos, Y1, U, Y2, V) \
448  if (target == AV_PIX_FMT_YUYV422) { \
449  dest[pos + 0] = Y1; \
450  dest[pos + 1] = U; \
451  dest[pos + 2] = Y2; \
452  dest[pos + 3] = V; \
453  } else { \
454  dest[pos + 0] = U; \
455  dest[pos + 1] = Y1; \
456  dest[pos + 2] = V; \
457  dest[pos + 3] = Y2; \
458  }
459 
460 static av_always_inline void
461 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
462  const int16_t **lumSrc, int lumFilterSize,
463  const int16_t *chrFilter, const int16_t **chrUSrc,
464  const int16_t **chrVSrc, int chrFilterSize,
465  const int16_t **alpSrc, uint8_t *dest, int dstW,
466  int y, enum AVPixelFormat target)
467 {
468  int i;
469 
470  for (i = 0; i < ((dstW + 1) >> 1); i++) {
471  int j;
472  int Y1 = 1 << 18;
473  int Y2 = 1 << 18;
474  int U = 1 << 18;
475  int V = 1 << 18;
476 
477  for (j = 0; j < lumFilterSize; j++) {
478  Y1 += lumSrc[j][i * 2] * lumFilter[j];
479  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
480  }
481  for (j = 0; j < chrFilterSize; j++) {
482  U += chrUSrc[j][i] * chrFilter[j];
483  V += chrVSrc[j][i] * chrFilter[j];
484  }
485  Y1 >>= 19;
486  Y2 >>= 19;
487  U >>= 19;
488  V >>= 19;
489  if ((Y1 | Y2 | U | V) & 0x100) {
490  Y1 = av_clip_uint8(Y1);
491  Y2 = av_clip_uint8(Y2);
492  U = av_clip_uint8(U);
493  V = av_clip_uint8(V);
494  }
495  output_pixels(4*i, Y1, U, Y2, V);
496  }
497 }
498 
499 static av_always_inline void
500 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
501  const int16_t *ubuf[2], const int16_t *vbuf[2],
502  const int16_t *abuf[2], uint8_t *dest, int dstW,
503  int yalpha, int uvalpha, int y,
504  enum AVPixelFormat target)
505 {
506  const int16_t *buf0 = buf[0], *buf1 = buf[1],
507  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
508  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
509  int yalpha1 = 4096 - yalpha;
510  int uvalpha1 = 4096 - uvalpha;
511  int i;
512 
513  for (i = 0; i < ((dstW + 1) >> 1); i++) {
514  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
515  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
516  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
517  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
518 
519  Y1 = av_clip_uint8(Y1);
520  Y2 = av_clip_uint8(Y2);
521  U = av_clip_uint8(U);
522  V = av_clip_uint8(V);
523 
524  output_pixels(i * 4, Y1, U, Y2, V);
525  }
526 }
527 
528 static av_always_inline void
529 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
530  const int16_t *ubuf[2], const int16_t *vbuf[2],
531  const int16_t *abuf0, uint8_t *dest, int dstW,
532  int uvalpha, int y, enum AVPixelFormat target)
533 {
534  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
535  int i;
536 
537  if (uvalpha < 2048) {
538  for (i = 0; i < ((dstW + 1) >> 1); i++) {
539  int Y1 = buf0[i * 2] >> 7;
540  int Y2 = buf0[i * 2 + 1] >> 7;
541  int U = ubuf0[i] >> 7;
542  int V = vbuf0[i] >> 7;
543 
544  Y1 = av_clip_uint8(Y1);
545  Y2 = av_clip_uint8(Y2);
546  U = av_clip_uint8(U);
547  V = av_clip_uint8(V);
548 
549  output_pixels(i * 4, Y1, U, Y2, V);
550  }
551  } else {
552  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
553  for (i = 0; i < ((dstW + 1) >> 1); i++) {
554  int Y1 = buf0[i * 2] >> 7;
555  int Y2 = buf0[i * 2 + 1] >> 7;
556  int U = (ubuf0[i] + ubuf1[i]) >> 8;
557  int V = (vbuf0[i] + vbuf1[i]) >> 8;
558 
559  Y1 = av_clip_uint8(Y1);
560  Y2 = av_clip_uint8(Y2);
561  U = av_clip_uint8(U);
562  V = av_clip_uint8(V);
563 
564  output_pixels(i * 4, Y1, U, Y2, V);
565  }
566  }
567 }
568 
569 #undef output_pixels
570 
571 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
572 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
573 
574 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B)
575 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R)
576 #define output_pixel(pos, val) \
577  if (isBE(target)) { \
578  AV_WB16(pos, val); \
579  } else { \
580  AV_WL16(pos, val); \
581  }
582 
583 static av_always_inline void
584 yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter,
585  const int32_t **lumSrc, int lumFilterSize,
586  const int16_t *chrFilter, const int32_t **chrUSrc,
587  const int32_t **chrVSrc, int chrFilterSize,
588  const int32_t **alpSrc, uint16_t *dest, int dstW,
589  int y, enum AVPixelFormat target)
590 {
591  int i;
592 
593  for (i = 0; i < ((dstW + 1) >> 1); i++) {
594  int j;
595  int Y1 = -0x40000000;
596  int Y2 = -0x40000000;
597  int U = -128 << 23; // 19
598  int V = -128 << 23;
599  int R, G, B;
600 
601  for (j = 0; j < lumFilterSize; j++) {
602  Y1 += lumSrc[j][i * 2] * lumFilter[j];
603  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
604  }
605  for (j = 0; j < chrFilterSize; j++) {
606  U += chrUSrc[j][i] * chrFilter[j];
607  V += chrVSrc[j][i] * chrFilter[j];
608  }
609 
610  // 8bit: 12+15=27; 16-bit: 12+19=31
611  Y1 >>= 14; // 10
612  Y1 += 0x10000;
613  Y2 >>= 14;
614  Y2 += 0x10000;
615  U >>= 14;
616  V >>= 14;
617 
618  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
619  Y1 -= c->yuv2rgb_y_offset;
620  Y2 -= c->yuv2rgb_y_offset;
621  Y1 *= c->yuv2rgb_y_coeff;
622  Y2 *= c->yuv2rgb_y_coeff;
623  Y1 += 1 << 13; // 21
624  Y2 += 1 << 13;
625  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
626 
627  R = V * c->yuv2rgb_v2r_coeff;
628  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
629  B = U * c->yuv2rgb_u2b_coeff;
630 
631  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
632  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
633  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
634  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
635  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
636  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
637  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
638  dest += 6;
639  }
640 }
641 
642 static av_always_inline void
644  const int32_t *ubuf[2], const int32_t *vbuf[2],
645  const int32_t *abuf[2], uint16_t *dest, int dstW,
646  int yalpha, int uvalpha, int y,
647  enum AVPixelFormat target)
648 {
649  const int32_t *buf0 = buf[0], *buf1 = buf[1],
650  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
651  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
652  int yalpha1 = 4096 - yalpha;
653  int uvalpha1 = 4096 - uvalpha;
654  int i;
655 
656  for (i = 0; i < ((dstW + 1) >> 1); i++) {
657  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
658  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
659  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
660  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
661  int R, G, B;
662 
663  Y1 -= c->yuv2rgb_y_offset;
664  Y2 -= c->yuv2rgb_y_offset;
665  Y1 *= c->yuv2rgb_y_coeff;
666  Y2 *= c->yuv2rgb_y_coeff;
667  Y1 += 1 << 13;
668  Y2 += 1 << 13;
669 
670  R = V * c->yuv2rgb_v2r_coeff;
671  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
672  B = U * c->yuv2rgb_u2b_coeff;
673 
674  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
675  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
676  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
677  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
678  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
679  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
680  dest += 6;
681  }
682 }
683 
684 static av_always_inline void
686  const int32_t *ubuf[2], const int32_t *vbuf[2],
687  const int32_t *abuf0, uint16_t *dest, int dstW,
688  int uvalpha, int y, enum AVPixelFormat target)
689 {
690  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
691  int i;
692 
693  if (uvalpha < 2048) {
694  for (i = 0; i < ((dstW + 1) >> 1); i++) {
695  int Y1 = (buf0[i * 2] ) >> 2;
696  int Y2 = (buf0[i * 2 + 1]) >> 2;
697  int U = (ubuf0[i] + (-128 << 11)) >> 2;
698  int V = (vbuf0[i] + (-128 << 11)) >> 2;
699  int R, G, B;
700 
701  Y1 -= c->yuv2rgb_y_offset;
702  Y2 -= c->yuv2rgb_y_offset;
703  Y1 *= c->yuv2rgb_y_coeff;
704  Y2 *= c->yuv2rgb_y_coeff;
705  Y1 += 1 << 13;
706  Y2 += 1 << 13;
707 
708  R = V * c->yuv2rgb_v2r_coeff;
709  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
710  B = U * c->yuv2rgb_u2b_coeff;
711 
712  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
713  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
714  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
715  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
716  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
717  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
718  dest += 6;
719  }
720  } else {
721  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
722  for (i = 0; i < ((dstW + 1) >> 1); i++) {
723  int Y1 = (buf0[i * 2] ) >> 2;
724  int Y2 = (buf0[i * 2 + 1]) >> 2;
725  int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
726  int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
727  int R, G, B;
728 
729  Y1 -= c->yuv2rgb_y_offset;
730  Y2 -= c->yuv2rgb_y_offset;
731  Y1 *= c->yuv2rgb_y_coeff;
732  Y2 *= c->yuv2rgb_y_coeff;
733  Y1 += 1 << 13;
734  Y2 += 1 << 13;
735 
736  R = V * c->yuv2rgb_v2r_coeff;
737  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
738  B = U * c->yuv2rgb_u2b_coeff;
739 
740  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
741  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
742  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
743  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
744  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
745  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
746  dest += 6;
747  }
748  }
749 }
750 
751 #undef output_pixel
752 #undef r_b
753 #undef b_r
754 
755 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
756 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
757  const int16_t **_lumSrc, int lumFilterSize, \
758  const int16_t *chrFilter, const int16_t **_chrUSrc, \
759  const int16_t **_chrVSrc, int chrFilterSize, \
760  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
761  int y) \
762 { \
763  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
764  **chrUSrc = (const int32_t **) _chrUSrc, \
765  **chrVSrc = (const int32_t **) _chrVSrc, \
766  **alpSrc = (const int32_t **) _alpSrc; \
767  uint16_t *dest = (uint16_t *) _dest; \
768  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
769  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
770  alpSrc, dest, dstW, y, fmt); \
771 } \
772  \
773 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
774  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
775  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
776  int yalpha, int uvalpha, int y) \
777 { \
778  const int32_t **buf = (const int32_t **) _buf, \
779  **ubuf = (const int32_t **) _ubuf, \
780  **vbuf = (const int32_t **) _vbuf, \
781  **abuf = (const int32_t **) _abuf; \
782  uint16_t *dest = (uint16_t *) _dest; \
783  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
784  dest, dstW, yalpha, uvalpha, y, fmt); \
785 } \
786  \
787 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
788  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
789  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
790  int uvalpha, int y) \
791 { \
792  const int32_t *buf0 = (const int32_t *) _buf0, \
793  **ubuf = (const int32_t **) _ubuf, \
794  **vbuf = (const int32_t **) _vbuf, \
795  *abuf0 = (const int32_t *) _abuf0; \
796  uint16_t *dest = (uint16_t *) _dest; \
797  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
798  dstW, uvalpha, y, fmt); \
799 }
800 
801 YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, AV_PIX_FMT_RGB48BE)
802 YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, AV_PIX_FMT_RGB48LE)
803 YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, AV_PIX_FMT_BGR48BE)
804 YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, AV_PIX_FMT_BGR48LE)
805 
806 /*
807  * Write out 2 RGB pixels in the target pixel format. This function takes a
808  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
809  * things like endianness conversion and shifting. The caller takes care of
810  * setting the correct offset in these tables from the chroma (U/V) values.
811  * This function then uses the luminance (Y1/Y2) values to write out the
812  * correct RGB values into the destination buffer.
813  */
814 static av_always_inline void
815 yuv2rgb_write(uint8_t *_dest, int i, unsigned Y1, unsigned Y2,
816  unsigned A1, unsigned A2,
817  const void *_r, const void *_g, const void *_b, int y,
818  enum AVPixelFormat target, int hasAlpha)
819 {
820  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
821  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
822  uint32_t *dest = (uint32_t *) _dest;
823  const uint32_t *r = (const uint32_t *) _r;
824  const uint32_t *g = (const uint32_t *) _g;
825  const uint32_t *b = (const uint32_t *) _b;
826 
827 #if CONFIG_SMALL
828  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
829 
830  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
831  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
832 #else
833  if (hasAlpha) {
834  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
835 
836  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
837  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
838  } else {
839  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
840  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
841  }
842 #endif
843  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
844  uint8_t *dest = (uint8_t *) _dest;
845  const uint8_t *r = (const uint8_t *) _r;
846  const uint8_t *g = (const uint8_t *) _g;
847  const uint8_t *b = (const uint8_t *) _b;
848 
849 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
850 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
851  dest[i * 6 + 0] = r_b[Y1];
852  dest[i * 6 + 1] = g[Y1];
853  dest[i * 6 + 2] = b_r[Y1];
854  dest[i * 6 + 3] = r_b[Y2];
855  dest[i * 6 + 4] = g[Y2];
856  dest[i * 6 + 5] = b_r[Y2];
857 #undef r_b
858 #undef b_r
859  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
860  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
861  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
862  uint16_t *dest = (uint16_t *) _dest;
863  const uint16_t *r = (const uint16_t *) _r;
864  const uint16_t *g = (const uint16_t *) _g;
865  const uint16_t *b = (const uint16_t *) _b;
866  int dr1, dg1, db1, dr2, dg2, db2;
867 
868  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
869  dr1 = dither_2x2_8[ y & 1 ][0];
870  dg1 = dither_2x2_4[ y & 1 ][0];
871  db1 = dither_2x2_8[(y & 1) ^ 1][0];
872  dr2 = dither_2x2_8[ y & 1 ][1];
873  dg2 = dither_2x2_4[ y & 1 ][1];
874  db2 = dither_2x2_8[(y & 1) ^ 1][1];
875  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
876  dr1 = dither_2x2_8[ y & 1 ][0];
877  dg1 = dither_2x2_8[ y & 1 ][1];
878  db1 = dither_2x2_8[(y & 1) ^ 1][0];
879  dr2 = dither_2x2_8[ y & 1 ][1];
880  dg2 = dither_2x2_8[ y & 1 ][0];
881  db2 = dither_2x2_8[(y & 1) ^ 1][1];
882  } else {
883  dr1 = dither_4x4_16[ y & 3 ][0];
884  dg1 = dither_4x4_16[ y & 3 ][1];
885  db1 = dither_4x4_16[(y & 3) ^ 3][0];
886  dr2 = dither_4x4_16[ y & 3 ][1];
887  dg2 = dither_4x4_16[ y & 3 ][0];
888  db2 = dither_4x4_16[(y & 3) ^ 3][1];
889  }
890 
891  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
892  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
893  } else /* 8/4-bit */ {
894  uint8_t *dest = (uint8_t *) _dest;
895  const uint8_t *r = (const uint8_t *) _r;
896  const uint8_t *g = (const uint8_t *) _g;
897  const uint8_t *b = (const uint8_t *) _b;
898  int dr1, dg1, db1, dr2, dg2, db2;
899 
900  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
901  const uint8_t * const d64 = dither_8x8_73[y & 7];
902  const uint8_t * const d32 = dither_8x8_32[y & 7];
903  dr1 = dg1 = d32[(i * 2 + 0) & 7];
904  db1 = d64[(i * 2 + 0) & 7];
905  dr2 = dg2 = d32[(i * 2 + 1) & 7];
906  db2 = d64[(i * 2 + 1) & 7];
907  } else {
908  const uint8_t * const d64 = dither_8x8_73 [y & 7];
909  const uint8_t * const d128 = dither_8x8_220[y & 7];
910  dr1 = db1 = d128[(i * 2 + 0) & 7];
911  dg1 = d64[(i * 2 + 0) & 7];
912  dr2 = db2 = d128[(i * 2 + 1) & 7];
913  dg2 = d64[(i * 2 + 1) & 7];
914  }
915 
916  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
917  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
918  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
919  } else {
920  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
921  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
922  }
923  }
924 }
925 
926 static av_always_inline void
927 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
928  const int16_t **lumSrc, int lumFilterSize,
929  const int16_t *chrFilter, const int16_t **chrUSrc,
930  const int16_t **chrVSrc, int chrFilterSize,
931  const int16_t **alpSrc, uint8_t *dest, int dstW,
932  int y, enum AVPixelFormat target, int hasAlpha)
933 {
934  int i;
935 
936  for (i = 0; i < ((dstW + 1) >> 1); i++) {
937  int j, A1, A2;
938  int Y1 = 1 << 18;
939  int Y2 = 1 << 18;
940  int U = 1 << 18;
941  int V = 1 << 18;
942  const void *r, *g, *b;
943 
944  for (j = 0; j < lumFilterSize; j++) {
945  Y1 += lumSrc[j][i * 2] * lumFilter[j];
946  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
947  }
948  for (j = 0; j < chrFilterSize; j++) {
949  U += chrUSrc[j][i] * chrFilter[j];
950  V += chrVSrc[j][i] * chrFilter[j];
951  }
952  Y1 >>= 19;
953  Y2 >>= 19;
954  U >>= 19;
955  V >>= 19;
956  if ((Y1 | Y2 | U | V) & 0x100) {
957  Y1 = av_clip_uint8(Y1);
958  Y2 = av_clip_uint8(Y2);
959  U = av_clip_uint8(U);
960  V = av_clip_uint8(V);
961  }
962  if (hasAlpha) {
963  A1 = 1 << 18;
964  A2 = 1 << 18;
965  for (j = 0; j < lumFilterSize; j++) {
966  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
967  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
968  }
969  A1 >>= 19;
970  A2 >>= 19;
971  if ((A1 | A2) & 0x100) {
972  A1 = av_clip_uint8(A1);
973  A2 = av_clip_uint8(A2);
974  }
975  }
976 
977  /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/
978  r = c->table_rV[V];
979  g = (c->table_gU[U] + c->table_gV[V]);
980  b = c->table_bU[U];
981 
982  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
983  r, g, b, y, target, hasAlpha);
984  }
985 }
986 
987 static av_always_inline void
988 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
989  const int16_t *ubuf[2], const int16_t *vbuf[2],
990  const int16_t *abuf[2], uint8_t *dest, int dstW,
991  int yalpha, int uvalpha, int y,
992  enum AVPixelFormat target, int hasAlpha)
993 {
994  const int16_t *buf0 = buf[0], *buf1 = buf[1],
995  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
996  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
997  *abuf0 = hasAlpha ? abuf[0] : NULL,
998  *abuf1 = hasAlpha ? abuf[1] : NULL;
999  int yalpha1 = 4096 - yalpha;
1000  int uvalpha1 = 4096 - uvalpha;
1001  int i;
1002 
1003  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1004  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1005  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1006  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1007  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1008  int A1, A2;
1009  const void *r, *g, *b;
1010 
1011  Y1 = av_clip_uint8(Y1);
1012  Y2 = av_clip_uint8(Y2);
1013  U = av_clip_uint8(U);
1014  V = av_clip_uint8(V);
1015 
1016  r = c->table_rV[V];
1017  g = (c->table_gU[U] + c->table_gV[V]);
1018  b = c->table_bU[U];
1019 
1020  if (hasAlpha) {
1021  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1022  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1023  A1 = av_clip_uint8(A1);
1024  A2 = av_clip_uint8(A2);
1025  }
1026 
1027  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1028  r, g, b, y, target, hasAlpha);
1029  }
1030 }
1031 
1032 static av_always_inline void
1033 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1034  const int16_t *ubuf[2], const int16_t *vbuf[2],
1035  const int16_t *abuf0, uint8_t *dest, int dstW,
1036  int uvalpha, int y, enum AVPixelFormat target,
1037  int hasAlpha)
1038 {
1039  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1040  int i;
1041 
1042  if (uvalpha < 2048) {
1043  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1044  int Y1 = buf0[i * 2] >> 7;
1045  int Y2 = buf0[i * 2 + 1] >> 7;
1046  int U = ubuf0[i] >> 7;
1047  int V = vbuf0[i] >> 7;
1048  int A1, A2;
1049  const void *r, *g, *b;
1050 
1051  Y1 = av_clip_uint8(Y1);
1052  Y2 = av_clip_uint8(Y2);
1053  U = av_clip_uint8(U);
1054  V = av_clip_uint8(V);
1055 
1056  r = c->table_rV[V];
1057  g = (c->table_gU[U] + c->table_gV[V]);
1058  b = c->table_bU[U];
1059 
1060  if (hasAlpha) {
1061  A1 = abuf0[i * 2 ] >> 7;
1062  A2 = abuf0[i * 2 + 1] >> 7;
1063  A1 = av_clip_uint8(A1);
1064  A2 = av_clip_uint8(A2);
1065  }
1066 
1067  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1068  r, g, b, y, target, hasAlpha);
1069  }
1070  } else {
1071  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1072  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1073  int Y1 = buf0[i * 2] >> 7;
1074  int Y2 = buf0[i * 2 + 1] >> 7;
1075  int U = (ubuf0[i] + ubuf1[i]) >> 8;
1076  int V = (vbuf0[i] + vbuf1[i]) >> 8;
1077  int A1, A2;
1078  const void *r, *g, *b;
1079 
1080  Y1 = av_clip_uint8(Y1);
1081  Y2 = av_clip_uint8(Y2);
1082  U = av_clip_uint8(U);
1083  V = av_clip_uint8(V);
1084 
1085  r = c->table_rV[V];
1086  g = (c->table_gU[U] + c->table_gV[V]);
1087  b = c->table_bU[U];
1088 
1089  if (hasAlpha) {
1090  A1 = abuf0[i * 2 ] >> 7;
1091  A2 = abuf0[i * 2 + 1] >> 7;
1092  A1 = av_clip_uint8(A1);
1093  A2 = av_clip_uint8(A2);
1094  }
1095 
1096  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1097  r, g, b, y, target, hasAlpha);
1098  }
1099  }
1100 }
1101 
1102 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1103 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1104  const int16_t **lumSrc, int lumFilterSize, \
1105  const int16_t *chrFilter, const int16_t **chrUSrc, \
1106  const int16_t **chrVSrc, int chrFilterSize, \
1107  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1108  int y) \
1109 { \
1110  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1111  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1112  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1113 }
1114 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1115 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1116 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1117  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1118  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1119  int yalpha, int uvalpha, int y) \
1120 { \
1121  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1122  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1123 } \
1124  \
1125 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1126  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1127  const int16_t *abuf0, uint8_t *dest, int dstW, \
1128  int uvalpha, int y) \
1129 { \
1130  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1131  dstW, uvalpha, y, fmt, hasAlpha); \
1132 }
1133 
1134 #if CONFIG_SMALL
1135 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1136 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1137 #else
1138 #if CONFIG_SWSCALE_ALPHA
1139 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1140 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1141 #endif
1142 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1143 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1144 #endif
1145 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1146 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1147 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1148 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1149 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1150 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1151 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1152 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1153 
1154 static av_always_inline void
1155 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1156  const int16_t **lumSrc, int lumFilterSize,
1157  const int16_t *chrFilter, const int16_t **chrUSrc,
1158  const int16_t **chrVSrc, int chrFilterSize,
1159  const int16_t **alpSrc, uint8_t *dest,
1160  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
1161 {
1162  int i;
1163  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1164 
1165  for (i = 0; i < dstW; i++) {
1166  int j;
1167  int Y = 0;
1168  int U = -128 << 19;
1169  int V = -128 << 19;
1170  int R, G, B, A;
1171 
1172  for (j = 0; j < lumFilterSize; j++) {
1173  Y += lumSrc[j][i] * lumFilter[j];
1174  }
1175  for (j = 0; j < chrFilterSize; j++) {
1176  U += chrUSrc[j][i] * chrFilter[j];
1177  V += chrVSrc[j][i] * chrFilter[j];
1178  }
1179  Y >>= 10;
1180  U >>= 10;
1181  V >>= 10;
1182  if (hasAlpha) {
1183  A = 1 << 21;
1184  for (j = 0; j < lumFilterSize; j++) {
1185  A += alpSrc[j][i] * lumFilter[j];
1186  }
1187  A >>= 19;
1188  if (A & 0x100)
1189  A = av_clip_uint8(A);
1190  }
1191  Y -= c->yuv2rgb_y_offset;
1192  Y *= c->yuv2rgb_y_coeff;
1193  Y += 1 << 21;
1194  R = Y + V*c->yuv2rgb_v2r_coeff;
1195  G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1196  B = Y + U*c->yuv2rgb_u2b_coeff;
1197  if ((R | G | B) & 0xC0000000) {
1198  R = av_clip_uintp2(R, 30);
1199  G = av_clip_uintp2(G, 30);
1200  B = av_clip_uintp2(B, 30);
1201  }
1202 
1203  switch(target) {
1204  case AV_PIX_FMT_ARGB:
1205  dest[0] = hasAlpha ? A : 255;
1206  dest[1] = R >> 22;
1207  dest[2] = G >> 22;
1208  dest[3] = B >> 22;
1209  break;
1210  case AV_PIX_FMT_RGB24:
1211  dest[0] = R >> 22;
1212  dest[1] = G >> 22;
1213  dest[2] = B >> 22;
1214  break;
1215  case AV_PIX_FMT_RGBA:
1216  dest[0] = R >> 22;
1217  dest[1] = G >> 22;
1218  dest[2] = B >> 22;
1219  dest[3] = hasAlpha ? A : 255;
1220  break;
1221  case AV_PIX_FMT_ABGR:
1222  dest[0] = hasAlpha ? A : 255;
1223  dest[1] = B >> 22;
1224  dest[2] = G >> 22;
1225  dest[3] = R >> 22;
1226  dest += 4;
1227  break;
1228  case AV_PIX_FMT_BGR24:
1229  dest[0] = B >> 22;
1230  dest[1] = G >> 22;
1231  dest[2] = R >> 22;
1232  break;
1233  case AV_PIX_FMT_BGRA:
1234  dest[0] = B >> 22;
1235  dest[1] = G >> 22;
1236  dest[2] = R >> 22;
1237  dest[3] = hasAlpha ? A : 255;
1238  break;
1239  }
1240  dest += step;
1241  }
1242 }
1243 
1244 #if CONFIG_SMALL
1245 YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1246 YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1247 YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1248 YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
1249 #else
1250 #if CONFIG_SWSCALE_ALPHA
1251 YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
1252 YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
1253 YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
1254 YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
1255 #endif
1256 YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
1257 YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
1258 YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
1259 YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
1260 #endif
1261 YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
1262 YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
1263 
1265  yuv2planar1_fn *yuv2plane1,
1266  yuv2planarX_fn *yuv2planeX,
1267  yuv2interleavedX_fn *yuv2nv12cX,
1268  yuv2packed1_fn *yuv2packed1,
1269  yuv2packed2_fn *yuv2packed2,
1270  yuv2packedX_fn *yuv2packedX)
1271 {
1272  enum AVPixelFormat dstFormat = c->dstFormat;
1273  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
1274 
1275  if (is16BPS(dstFormat)) {
1276  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
1277  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
1278  } else if (is9_OR_10BPS(dstFormat)) {
1279  if (desc->comp[0].depth_minus1 == 8) {
1280  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
1281  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
1282  } else {
1283  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
1284  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
1285  }
1286  } else {
1287  *yuv2plane1 = yuv2plane1_8_c;
1288  *yuv2planeX = yuv2planeX_8_c;
1289  if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)
1290  *yuv2nv12cX = yuv2nv12cX_c;
1291  }
1292 
1293  if(c->flags & SWS_FULL_CHR_H_INT) {
1294  switch (dstFormat) {
1295  case AV_PIX_FMT_RGBA:
1296 #if CONFIG_SMALL
1297  *yuv2packedX = yuv2rgba32_full_X_c;
1298 #else
1299 #if CONFIG_SWSCALE_ALPHA
1300  if (c->alpPixBuf) {
1301  *yuv2packedX = yuv2rgba32_full_X_c;
1302  } else
1303 #endif /* CONFIG_SWSCALE_ALPHA */
1304  {
1305  *yuv2packedX = yuv2rgbx32_full_X_c;
1306  }
1307 #endif /* !CONFIG_SMALL */
1308  break;
1309  case AV_PIX_FMT_ARGB:
1310 #if CONFIG_SMALL
1311  *yuv2packedX = yuv2argb32_full_X_c;
1312 #else
1313 #if CONFIG_SWSCALE_ALPHA
1314  if (c->alpPixBuf) {
1315  *yuv2packedX = yuv2argb32_full_X_c;
1316  } else
1317 #endif /* CONFIG_SWSCALE_ALPHA */
1318  {
1319  *yuv2packedX = yuv2xrgb32_full_X_c;
1320  }
1321 #endif /* !CONFIG_SMALL */
1322  break;
1323  case AV_PIX_FMT_BGRA:
1324 #if CONFIG_SMALL
1325  *yuv2packedX = yuv2bgra32_full_X_c;
1326 #else
1327 #if CONFIG_SWSCALE_ALPHA
1328  if (c->alpPixBuf) {
1329  *yuv2packedX = yuv2bgra32_full_X_c;
1330  } else
1331 #endif /* CONFIG_SWSCALE_ALPHA */
1332  {
1333  *yuv2packedX = yuv2bgrx32_full_X_c;
1334  }
1335 #endif /* !CONFIG_SMALL */
1336  break;
1337  case AV_PIX_FMT_ABGR:
1338 #if CONFIG_SMALL
1339  *yuv2packedX = yuv2abgr32_full_X_c;
1340 #else
1341 #if CONFIG_SWSCALE_ALPHA
1342  if (c->alpPixBuf) {
1343  *yuv2packedX = yuv2abgr32_full_X_c;
1344  } else
1345 #endif /* CONFIG_SWSCALE_ALPHA */
1346  {
1347  *yuv2packedX = yuv2xbgr32_full_X_c;
1348  }
1349 #endif /* !CONFIG_SMALL */
1350  break;
1351  case AV_PIX_FMT_RGB24:
1352  *yuv2packedX = yuv2rgb24_full_X_c;
1353  break;
1354  case AV_PIX_FMT_BGR24:
1355  *yuv2packedX = yuv2bgr24_full_X_c;
1356  break;
1357  }
1358  } else {
1359  switch (dstFormat) {
1360  case AV_PIX_FMT_RGB48LE:
1361  *yuv2packed1 = yuv2rgb48le_1_c;
1362  *yuv2packed2 = yuv2rgb48le_2_c;
1363  *yuv2packedX = yuv2rgb48le_X_c;
1364  break;
1365  case AV_PIX_FMT_RGB48BE:
1366  *yuv2packed1 = yuv2rgb48be_1_c;
1367  *yuv2packed2 = yuv2rgb48be_2_c;
1368  *yuv2packedX = yuv2rgb48be_X_c;
1369  break;
1370  case AV_PIX_FMT_BGR48LE:
1371  *yuv2packed1 = yuv2bgr48le_1_c;
1372  *yuv2packed2 = yuv2bgr48le_2_c;
1373  *yuv2packedX = yuv2bgr48le_X_c;
1374  break;
1375  case AV_PIX_FMT_BGR48BE:
1376  *yuv2packed1 = yuv2bgr48be_1_c;
1377  *yuv2packed2 = yuv2bgr48be_2_c;
1378  *yuv2packedX = yuv2bgr48be_X_c;
1379  break;
1380  case AV_PIX_FMT_RGB32:
1381  case AV_PIX_FMT_BGR32:
1382 #if CONFIG_SMALL
1383  *yuv2packed1 = yuv2rgb32_1_c;
1384  *yuv2packed2 = yuv2rgb32_2_c;
1385  *yuv2packedX = yuv2rgb32_X_c;
1386 #else
1387 #if CONFIG_SWSCALE_ALPHA
1388  if (c->alpPixBuf) {
1389  *yuv2packed1 = yuv2rgba32_1_c;
1390  *yuv2packed2 = yuv2rgba32_2_c;
1391  *yuv2packedX = yuv2rgba32_X_c;
1392  } else
1393 #endif /* CONFIG_SWSCALE_ALPHA */
1394  {
1395  *yuv2packed1 = yuv2rgbx32_1_c;
1396  *yuv2packed2 = yuv2rgbx32_2_c;
1397  *yuv2packedX = yuv2rgbx32_X_c;
1398  }
1399 #endif /* !CONFIG_SMALL */
1400  break;
1401  case AV_PIX_FMT_RGB32_1:
1402  case AV_PIX_FMT_BGR32_1:
1403 #if CONFIG_SMALL
1404  *yuv2packed1 = yuv2rgb32_1_1_c;
1405  *yuv2packed2 = yuv2rgb32_1_2_c;
1406  *yuv2packedX = yuv2rgb32_1_X_c;
1407 #else
1408 #if CONFIG_SWSCALE_ALPHA
1409  if (c->alpPixBuf) {
1410  *yuv2packed1 = yuv2rgba32_1_1_c;
1411  *yuv2packed2 = yuv2rgba32_1_2_c;
1412  *yuv2packedX = yuv2rgba32_1_X_c;
1413  } else
1414 #endif /* CONFIG_SWSCALE_ALPHA */
1415  {
1416  *yuv2packed1 = yuv2rgbx32_1_1_c;
1417  *yuv2packed2 = yuv2rgbx32_1_2_c;
1418  *yuv2packedX = yuv2rgbx32_1_X_c;
1419  }
1420 #endif /* !CONFIG_SMALL */
1421  break;
1422  case AV_PIX_FMT_RGB24:
1423  *yuv2packed1 = yuv2rgb24_1_c;
1424  *yuv2packed2 = yuv2rgb24_2_c;
1425  *yuv2packedX = yuv2rgb24_X_c;
1426  break;
1427  case AV_PIX_FMT_BGR24:
1428  *yuv2packed1 = yuv2bgr24_1_c;
1429  *yuv2packed2 = yuv2bgr24_2_c;
1430  *yuv2packedX = yuv2bgr24_X_c;
1431  break;
1432  case AV_PIX_FMT_RGB565LE:
1433  case AV_PIX_FMT_RGB565BE:
1434  case AV_PIX_FMT_BGR565LE:
1435  case AV_PIX_FMT_BGR565BE:
1436  *yuv2packed1 = yuv2rgb16_1_c;
1437  *yuv2packed2 = yuv2rgb16_2_c;
1438  *yuv2packedX = yuv2rgb16_X_c;
1439  break;
1440  case AV_PIX_FMT_RGB555LE:
1441  case AV_PIX_FMT_RGB555BE:
1442  case AV_PIX_FMT_BGR555LE:
1443  case AV_PIX_FMT_BGR555BE:
1444  *yuv2packed1 = yuv2rgb15_1_c;
1445  *yuv2packed2 = yuv2rgb15_2_c;
1446  *yuv2packedX = yuv2rgb15_X_c;
1447  break;
1448  case AV_PIX_FMT_RGB444LE:
1449  case AV_PIX_FMT_RGB444BE:
1450  case AV_PIX_FMT_BGR444LE:
1451  case AV_PIX_FMT_BGR444BE:
1452  *yuv2packed1 = yuv2rgb12_1_c;
1453  *yuv2packed2 = yuv2rgb12_2_c;
1454  *yuv2packedX = yuv2rgb12_X_c;
1455  break;
1456  case AV_PIX_FMT_RGB8:
1457  case AV_PIX_FMT_BGR8:
1458  *yuv2packed1 = yuv2rgb8_1_c;
1459  *yuv2packed2 = yuv2rgb8_2_c;
1460  *yuv2packedX = yuv2rgb8_X_c;
1461  break;
1462  case AV_PIX_FMT_RGB4:
1463  case AV_PIX_FMT_BGR4:
1464  *yuv2packed1 = yuv2rgb4_1_c;
1465  *yuv2packed2 = yuv2rgb4_2_c;
1466  *yuv2packedX = yuv2rgb4_X_c;
1467  break;
1468  case AV_PIX_FMT_RGB4_BYTE:
1469  case AV_PIX_FMT_BGR4_BYTE:
1470  *yuv2packed1 = yuv2rgb4b_1_c;
1471  *yuv2packed2 = yuv2rgb4b_2_c;
1472  *yuv2packedX = yuv2rgb4b_X_c;
1473  break;
1474  }
1475  }
1476  switch (dstFormat) {
1477  case AV_PIX_FMT_MONOWHITE:
1478  *yuv2packed1 = yuv2monowhite_1_c;
1479  *yuv2packed2 = yuv2monowhite_2_c;
1480  *yuv2packedX = yuv2monowhite_X_c;
1481  break;
1482  case AV_PIX_FMT_MONOBLACK:
1483  *yuv2packed1 = yuv2monoblack_1_c;
1484  *yuv2packed2 = yuv2monoblack_2_c;
1485  *yuv2packedX = yuv2monoblack_X_c;
1486  break;
1487  case AV_PIX_FMT_YUYV422:
1488  *yuv2packed1 = yuv2yuyv422_1_c;
1489  *yuv2packed2 = yuv2yuyv422_2_c;
1490  *yuv2packedX = yuv2yuyv422_X_c;
1491  break;
1492  case AV_PIX_FMT_UYVY422:
1493  *yuv2packed1 = yuv2uyvy422_1_c;
1494  *yuv2packed2 = yuv2uyvy422_2_c;
1495  *yuv2packedX = yuv2uyvy422_X_c;
1496  break;
1497  }
1498 }
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:82
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1033
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1435
#define A1
Definition: binkdsp.c:30
#define B
Definition: dsputil.c:1897
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:67
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:85
#define accumulate_bit(acc, val)
Definition: output.c:299
int acc
Definition: yuv2rgb.c:476
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 ...
Definition: pixfmt.h:114
#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha)
Definition: output.c:1102
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
external API header
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:117
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 ...
Definition: pixfmt.h:135
#define CONFIG_SWSCALE_ALPHA
Definition: config.h:318
const uint8_t dither_8x8_32[8][8]
Definition: output.c:56
const uint8_t dither_4x4_16[4][8]
Definition: output.c:49
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:135
#define r_b
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:148
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:88
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:447
Macro definitions for various function/variable attributes.
#define b_r
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:74
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:112
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:86
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:83
uint8_t
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:109
#define b
Definition: input.c:52
uint8_t * table_bU[256]
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 ...
Definition: pixfmt.h:134
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:111
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:95
static av_always_inline void yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:685
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:500
#define R
Definition: dsputil.c:1899
const uint8_t dither_8x8_220[8][8]
Definition: output.c:79
external api for the swscale stuff
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1155
enum AVPixelFormat dstFormat
Destination pixel format.
#define r
Definition: input.c:51
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
#define A2
Definition: binkdsp.c:31
#define B_R
Definition: output.c:575
uint16_t depth_minus1
number of bits in the component minus 1
Definition: pixdesc.h:43
const uint8_t * d64
Definition: yuv2rgb.c:444
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
g
Definition: yuv2rgb.c:540
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:90
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:195
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:215
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:93
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:140
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
Definition: vf_drawbox.c:36
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:349
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:529
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
Definition: mpegaudioenc.c:318
as above, but U and V bytes are swapped
Definition: pixfmt.h:91
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:89
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:385
static av_always_inline int is9_OR_10BPS(enum AVPixelFormat pix_fmt)
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:253
int32_t
int table_gV[256]
static const uint8_t dither_2x2_8[2][8]
Definition: output.c:44
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:68
static av_always_inline void yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:584
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:139
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:116
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
NULL
Definition: eval.c:52
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:84
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:45
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:310
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:988
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:66
uint8_t * table_gU[256]
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:461
byte swapping routines
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, unsigned Y1, unsigned Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:815
static int step
Definition: avplay.c:252
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 ...
Definition: pixfmt.h:119
const uint8_t dither_8x8_73[8][8]
Definition: output.c:67
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1114
const uint8_t * chrDither8
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 ...
Definition: pixfmt.h:113
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 ...
Definition: pixfmt.h:137
Definition: vf_drawbox.c:36
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:75
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:74
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:927
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:108
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:576
const uint8_t * d128
Definition: yuv2rgb.c:475
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:87
#define YUV2PACKED16WRAPPER(name, base, ext, fmt)
Definition: output.c:755
static av_always_inline void yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:182
static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:263
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX)
Definition: output.c:1264
static av_always_inline void yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:643
#define G
Definition: dsputil.c:1898
packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 ...
Definition: pixfmt.h:118
#define R_B
Definition: output.c:574
const uint8_t * d32
Definition: yuv2rgb.c:443
packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 ...
Definition: pixfmt.h:136
static const uint8_t dither_2x2_4[2][8]
Definition: output.c:39
Definition: vf_drawbox.c:36
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:412
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
uint8_t * table_rV[256]