huffyuv.c
Go to the documentation of this file.
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of Libav.
10  *
11  * Libav is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * Libav is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with Libav; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36 
37 #define VLC_BITS 11
38 
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50 
51 typedef enum Predictor{
52  LEFT= 0,
55 } Predictor;
56 
57 typedef struct HYuvContext{
65  int version;
66  int yuy2; //use yuy2 instead of 422P
67  int bgr32; //use bgr32 instead of bgr24
68  int width, height;
69  int flags;
70  int context;
73  uint8_t *temp[3];
74  uint64_t stats[3][256];
75  uint8_t len[3][256];
76  uint32_t bits[3][256];
77  uint32_t pix_bgr_map[1<<VLC_BITS];
78  VLC vlc[6]; //Y,U,V,YY,YU,YV
80  uint8_t *bitstream_buffer;
81  unsigned int bitstream_buffer_size;
84 
85 #define classic_shift_luma_table_size 42
87  34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
88  16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
89  69,68, 0
90 };
91 
92 #define classic_shift_chroma_table_size 59
94  66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
95  56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
96  214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 };
98 
99 static const unsigned char classic_add_luma[256] = {
100  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
101  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
102  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
103  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
104  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
105  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
106  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
107  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
108  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
109  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
110  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
111  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
112  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
113  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
114  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
115  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 };
117 
118 static const unsigned char classic_add_chroma[256] = {
119  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
120  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
121  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
122  43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
123  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
124  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
125  17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
126  112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
127  0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
128  135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
129  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
130  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
131  7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
132  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
133  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
134  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 };
136 
137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
138  int i;
139  if(w<32){
140  for(i=0; i<w; i++){
141  const int temp= src[i];
142  dst[i]= temp - left;
143  left= temp;
144  }
145  return left;
146  }else{
147  for(i=0; i<16; i++){
148  const int temp= src[i];
149  dst[i]= temp - left;
150  left= temp;
151  }
152  s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
153  return src[w-1];
154  }
155 }
156 
157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
158  int i;
159  int r,g,b;
160  r= *red;
161  g= *green;
162  b= *blue;
163  for(i=0; i<FFMIN(w,4); i++){
164  const int rt= src[i*4+R];
165  const int gt= src[i*4+G];
166  const int bt= src[i*4+B];
167  dst[i*4+R]= rt - r;
168  dst[i*4+G]= gt - g;
169  dst[i*4+B]= bt - b;
170  r = rt;
171  g = gt;
172  b = bt;
173  }
174  s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
175  *red= src[(w-1)*4+R];
176  *green= src[(w-1)*4+G];
177  *blue= src[(w-1)*4+B];
178 }
179 
180 static int read_len_table(uint8_t *dst, GetBitContext *gb){
181  int i, val, repeat;
182 
183  for(i=0; i<256;){
184  repeat= get_bits(gb, 3);
185  val = get_bits(gb, 5);
186  if(repeat==0)
187  repeat= get_bits(gb, 8);
188 //printf("%d %d\n", val, repeat);
189  if(i+repeat > 256 || get_bits_left(gb) < 0) {
190  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
191  return -1;
192  }
193  while (repeat--)
194  dst[i++] = val;
195  }
196  return 0;
197 }
198 
199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
200  int len, index;
201  uint32_t bits=0;
202 
203  for(len=32; len>0; len--){
204  for(index=0; index<256; index++){
205  if(len_table[index]==len)
206  dst[index]= bits++;
207  }
208  if(bits & 1){
209  av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
210  return -1;
211  }
212  bits >>= 1;
213  }
214  return 0;
215 }
216 
217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
218 typedef struct {
219  uint64_t val;
220  int name;
221 } HeapElem;
222 
223 static void heap_sift(HeapElem *h, int root, int size)
224 {
225  while(root*2+1 < size) {
226  int child = root*2+1;
227  if(child < size-1 && h[child].val > h[child+1].val)
228  child++;
229  if(h[root].val > h[child].val) {
230  FFSWAP(HeapElem, h[root], h[child]);
231  root = child;
232  } else
233  break;
234  }
235 }
236 
237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
238  HeapElem h[256];
239  int up[2*256];
240  int len[2*256];
241  int offset, i, next;
242  int size = 256;
243 
244  for(offset=1; ; offset<<=1){
245  for(i=0; i<size; i++){
246  h[i].name = i;
247  h[i].val = (stats[i] << 8) + offset;
248  }
249  for(i=size/2-1; i>=0; i--)
250  heap_sift(h, i, size);
251 
252  for(next=size; next<size*2-1; next++){
253  // merge the two smallest entries, and put it back in the heap
254  uint64_t min1v = h[0].val;
255  up[h[0].name] = next;
256  h[0].val = INT64_MAX;
257  heap_sift(h, 0, size);
258  up[h[0].name] = next;
259  h[0].name = next;
260  h[0].val += min1v;
261  heap_sift(h, 0, size);
262  }
263 
264  len[2*size-2] = 0;
265  for(i=2*size-3; i>=size; i--)
266  len[i] = len[up[i]] + 1;
267  for(i=0; i<size; i++) {
268  dst[i] = len[up[i]] + 1;
269  if(dst[i] >= 32) break;
270  }
271  if(i==size) break;
272  }
273 }
274 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
275 
277  uint16_t symbols[1<<VLC_BITS];
278  uint16_t bits[1<<VLC_BITS];
279  uint8_t len[1<<VLC_BITS];
280  int ret;
281 
282  if(s->bitstream_bpp < 24){
283  int p, i, y, u;
284  for(p=0; p<3; p++){
285  for(i=y=0; y<256; y++){
286  int len0 = s->len[0][y];
287  int limit = VLC_BITS - len0;
288  if(limit <= 0)
289  continue;
290  for(u=0; u<256; u++){
291  int len1 = s->len[p][u];
292  if(len1 > limit)
293  continue;
294  len[i] = len0 + len1;
295  bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
296  symbols[i] = (y<<8) + u;
297  if(symbols[i] != 0xffff) // reserved to mean "invalid"
298  i++;
299  }
300  }
301  ff_free_vlc(&s->vlc[3+p]);
302  if ((ret = ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
303  bits, 2, 2, symbols, 2, 2, 0)) < 0)
304  return ret;
305  }
306  }else{
307  uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
308  int i, b, g, r, code;
309  int p0 = s->decorrelate;
310  int p1 = !s->decorrelate;
311  // restrict the range to +/-16 becaues that's pretty much guaranteed to
312  // cover all the combinations that fit in 11 bits total, and it doesn't
313  // matter if we miss a few rare codes.
314  for(i=0, g=-16; g<16; g++){
315  int len0 = s->len[p0][g&255];
316  int limit0 = VLC_BITS - len0;
317  if(limit0 < 2)
318  continue;
319  for(b=-16; b<16; b++){
320  int len1 = s->len[p1][b&255];
321  int limit1 = limit0 - len1;
322  if(limit1 < 1)
323  continue;
324  code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
325  for(r=-16; r<16; r++){
326  int len2 = s->len[2][r&255];
327  if(len2 > limit1)
328  continue;
329  len[i] = len0 + len1 + len2;
330  bits[i] = (code << len2) + s->bits[2][r&255];
331  if(s->decorrelate){
332  map[i][G] = g;
333  map[i][B] = g+b;
334  map[i][R] = g+r;
335  }else{
336  map[i][B] = g;
337  map[i][G] = b;
338  map[i][R] = r;
339  }
340  i++;
341  }
342  }
343  }
344  ff_free_vlc(&s->vlc[3]);
345  if ((ret = init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1,
346  bits, 2, 2, 0)) < 0)
347  return ret;
348  }
349  return 0;
350 }
351 
352 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
353  GetBitContext gb;
354  int i, ret;
355 
356  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
357  return ret;
358 
359  for(i=0; i<3; i++){
360  if ((ret = read_len_table(s->len[i], &gb)) < 0)
361  return ret;
362  if ((ret = generate_bits_table(s->bits[i], s->len[i])) < 0)
363  return ret;
364  ff_free_vlc(&s->vlc[i]);
365  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
366  s->bits[i], 4, 4, 0)) < 0)
367  return ret;
368  }
369 
370  if ((ret = generate_joint_tables(s)) < 0)
371  return ret;
372 
373  return (get_bits_count(&gb)+7)/8;
374 }
375 
377 #if 1
378  GetBitContext gb;
379  int i, ret;
380 
381  if ((ret = init_get_bits(&gb, classic_shift_luma,
383  return ret;
384  if ((ret = read_len_table(s->len[0], &gb)) < 0)
385  return ret;
386  if ((ret = init_get_bits(&gb, classic_shift_chroma,
388  return ret;
389  if ((ret = read_len_table(s->len[1], &gb)) < 0)
390  return ret;
391 
392  for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
393  for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
394 
395  if(s->bitstream_bpp >= 24){
396  memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
397  memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
398  }
399  memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
400  memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
401 
402  for(i=0; i<3; i++){
403  ff_free_vlc(&s->vlc[i]);
404  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
405  s->bits[i], 4, 4, 0)) < 0)
406  return ret;
407  }
408 
409  if ((ret = generate_joint_tables(s)) < 0)
410  return ret;
411 
412  return 0;
413 #else
414  av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
415  return -1;
416 #endif
417 }
418 
420  int i;
421 
422  if(s->bitstream_bpp<24){
423  for(i=0; i<3; i++){
424  s->temp[i]= av_malloc(s->width + 16);
425  }
426  }else{
427  s->temp[0]= av_mallocz(4*s->width + 16);
428  }
429 }
430 
431 static av_cold int common_init(AVCodecContext *avctx){
432  HYuvContext *s = avctx->priv_data;
433 
434  s->avctx= avctx;
435  s->flags= avctx->flags;
436 
437  dsputil_init(&s->dsp, avctx);
438 
439  s->width= avctx->width;
440  s->height= avctx->height;
441  assert(s->width>0 && s->height>0);
442 
443  return 0;
444 }
445 
446 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
447 static av_cold int decode_init(AVCodecContext *avctx)
448 {
449  HYuvContext *s = avctx->priv_data;
450  int ret;
451 
452  common_init(avctx);
453  memset(s->vlc, 0, 3*sizeof(VLC));
454 
455  avctx->coded_frame= &s->picture;
456  s->interlaced= s->height > 288;
457 
458 s->bgr32=1;
459 //if(avctx->extradata)
460 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
461  if(avctx->extradata_size){
462  if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
463  s->version=1; // do such files exist at all?
464  else
465  s->version=2;
466  }else
467  s->version=0;
468 
469  if(s->version==2){
470  int method, interlace;
471 
472  if (avctx->extradata_size < 4)
473  return -1;
474 
475  method= ((uint8_t*)avctx->extradata)[0];
476  s->decorrelate= method&64 ? 1 : 0;
477  s->predictor= method&63;
478  s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
479  if(s->bitstream_bpp==0)
480  s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
481  interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
482  s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
483  s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
484 
485  if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
486  avctx->extradata_size - 4)) < 0)
487  return ret;
488  }else{
489  switch(avctx->bits_per_coded_sample&7){
490  case 1:
491  s->predictor= LEFT;
492  s->decorrelate= 0;
493  break;
494  case 2:
495  s->predictor= LEFT;
496  s->decorrelate= 1;
497  break;
498  case 3:
499  s->predictor= PLANE;
500  s->decorrelate= avctx->bits_per_coded_sample >= 24;
501  break;
502  case 4:
503  s->predictor= MEDIAN;
504  s->decorrelate= 0;
505  break;
506  default:
507  s->predictor= LEFT; //OLD
508  s->decorrelate= 0;
509  break;
510  }
511  s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
512  s->context= 0;
513 
514  if ((ret = read_old_huffman_tables(s)) < 0)
515  return ret;
516  }
517 
518  switch(s->bitstream_bpp){
519  case 12:
520  avctx->pix_fmt = PIX_FMT_YUV420P;
521  break;
522  case 16:
523  if(s->yuy2){
524  avctx->pix_fmt = PIX_FMT_YUYV422;
525  }else{
526  avctx->pix_fmt = PIX_FMT_YUV422P;
527  }
528  break;
529  case 24:
530  case 32:
531  if(s->bgr32){
532  avctx->pix_fmt = PIX_FMT_RGB32;
533  }else{
534  avctx->pix_fmt = PIX_FMT_BGR24;
535  }
536  break;
537  default:
538  return AVERROR_INVALIDDATA;
539  }
540 
541  if (s->predictor == MEDIAN && avctx->pix_fmt == PIX_FMT_YUV422P &&
542  avctx->width % 4) {
543  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 4 "
544  "for this combination of colorspace and predictor type.\n");
545  return AVERROR_INVALIDDATA;
546  }
547 
548  alloc_temp(s);
549 
550 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
551 
552  return 0;
553 }
554 
556 {
557  HYuvContext *s = avctx->priv_data;
558  int i, ret;
559 
560  avctx->coded_frame= &s->picture;
561  alloc_temp(s);
562 
563  for (i = 0; i < 6; i++)
564  s->vlc[i].table = NULL;
565 
566  if(s->version==2){
567  if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
568  avctx->extradata_size)) < 0)
569  return ret;
570  }else{
571  if ((ret = read_old_huffman_tables(s)) < 0)
572  return ret;
573  }
574 
575  return 0;
576 }
577 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
578 
579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
580 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
581  int i;
582  int index= 0;
583 
584  for(i=0; i<256;){
585  int val= len[i];
586  int repeat=0;
587 
588  for(; i<256 && len[i]==val && repeat<255; i++)
589  repeat++;
590 
591  assert(val < 32 && val >0 && repeat<256 && repeat>0);
592  if(repeat>7){
593  buf[index++]= val;
594  buf[index++]= repeat;
595  }else{
596  buf[index++]= val | (repeat<<5);
597  }
598  }
599 
600  return index;
601 }
602 
603 static av_cold int encode_init(AVCodecContext *avctx)
604 {
605  HYuvContext *s = avctx->priv_data;
606  int i, j;
607 
608  common_init(avctx);
609 
610  avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
611  avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
612  s->version=2;
613 
614  avctx->coded_frame= &s->picture;
615 
616  switch(avctx->pix_fmt){
617  case PIX_FMT_YUV420P:
618  s->bitstream_bpp= 12;
619  break;
620  case PIX_FMT_YUV422P:
621  s->bitstream_bpp= 16;
622  break;
623  case PIX_FMT_RGB32:
624  s->bitstream_bpp= 24;
625  break;
626  default:
627  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
628  return -1;
629  }
631  s->decorrelate= s->bitstream_bpp >= 24;
632  s->predictor= avctx->prediction_method;
633  s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
634  if(avctx->context_model==1){
635  s->context= avctx->context_model;
637  av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
638  return -1;
639  }
640  }else s->context= 0;
641 
642  if(avctx->codec->id==CODEC_ID_HUFFYUV){
643  if(avctx->pix_fmt==PIX_FMT_YUV420P){
644  av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
645  return -1;
646  }
647  if(avctx->context_model){
648  av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
649  return -1;
650  }
651  if(s->interlaced != ( s->height > 288 ))
652  av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
653  }
654 
655  if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
656  av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
657  return -1;
658  }
659 
660  ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
661  ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
662  ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
663  if(s->context)
664  ((uint8_t*)avctx->extradata)[2]|= 0x40;
665  ((uint8_t*)avctx->extradata)[3]= 0;
666  s->avctx->extradata_size= 4;
667 
668  if(avctx->stats_in){
669  char *p= avctx->stats_in;
670 
671  for(i=0; i<3; i++)
672  for(j=0; j<256; j++)
673  s->stats[i][j]= 1;
674 
675  for(;;){
676  for(i=0; i<3; i++){
677  char *next;
678 
679  for(j=0; j<256; j++){
680  s->stats[i][j]+= strtol(p, &next, 0);
681  if(next==p) return -1;
682  p=next;
683  }
684  }
685  if(p[0]==0 || p[1]==0 || p[2]==0) break;
686  }
687  }else{
688  for(i=0; i<3; i++)
689  for(j=0; j<256; j++){
690  int d= FFMIN(j, 256-j);
691 
692  s->stats[i][j]= 100000000/(d+1);
693  }
694  }
695 
696  for(i=0; i<3; i++){
697  generate_len_table(s->len[i], s->stats[i]);
698 
699  if(generate_bits_table(s->bits[i], s->len[i])<0){
700  return -1;
701  }
702 
703  s->avctx->extradata_size+=
704  store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
705  }
706 
707  if(s->context){
708  for(i=0; i<3; i++){
709  int pels = s->width*s->height / (i?40:10);
710  for(j=0; j<256; j++){
711  int d= FFMIN(j, 256-j);
712  s->stats[i][j]= pels/(d+1);
713  }
714  }
715  }else{
716  for(i=0; i<3; i++)
717  for(j=0; j<256; j++)
718  s->stats[i][j]= 0;
719  }
720 
721 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
722 
723  alloc_temp(s);
724 
725  s->picture_number=0;
726 
727  return 0;
728 }
729 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
730 
731 /* TODO instead of restarting the read when the code isn't in the first level
732  * of the joint table, jump into the 2nd level of the individual table. */
733 #define READ_2PIX(dst0, dst1, plane1){\
734  uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
735  if(code != 0xffff){\
736  dst0 = code>>8;\
737  dst1 = code;\
738  }else{\
739  dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
740  dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
741  }\
742 }
743 
744 static void decode_422_bitstream(HYuvContext *s, int count){
745  int i;
746 
747  count/=2;
748 
749  if(count >= (get_bits_left(&s->gb))/(31*4)){
750  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
751  READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
752  READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
753  }
754  }else{
755  for(i=0; i<count; i++){
756  READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
757  READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
758  }
759  }
760 }
761 
762 static void decode_gray_bitstream(HYuvContext *s, int count){
763  int i;
764 
765  count/=2;
766 
767  if(count >= (get_bits_left(&s->gb))/(31*2)){
768  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
769  READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
770  }
771  }else{
772  for(i=0; i<count; i++){
773  READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
774  }
775  }
776 }
777 
778 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
779 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
780  int i;
781  const uint8_t *y = s->temp[0] + offset;
782  const uint8_t *u = s->temp[1] + offset/2;
783  const uint8_t *v = s->temp[2] + offset/2;
784 
785  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
786  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
787  return -1;
788  }
789 
790 #define LOAD4\
791  int y0 = y[2*i];\
792  int y1 = y[2*i+1];\
793  int u0 = u[i];\
794  int v0 = v[i];
795 
796  count/=2;
797  if(s->flags&CODEC_FLAG_PASS1){
798  for(i=0; i<count; i++){
799  LOAD4;
800  s->stats[0][y0]++;
801  s->stats[1][u0]++;
802  s->stats[0][y1]++;
803  s->stats[2][v0]++;
804  }
805  }
807  return 0;
808  if(s->context){
809  for(i=0; i<count; i++){
810  LOAD4;
811  s->stats[0][y0]++;
812  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
813  s->stats[1][u0]++;
814  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
815  s->stats[0][y1]++;
816  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
817  s->stats[2][v0]++;
818  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
819  }
820  }else{
821  for(i=0; i<count; i++){
822  LOAD4;
823  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
824  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
825  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
826  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
827  }
828  }
829  return 0;
830 }
831 
832 static int encode_gray_bitstream(HYuvContext *s, int count){
833  int i;
834 
835  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
836  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
837  return -1;
838  }
839 
840 #define LOAD2\
841  int y0 = s->temp[0][2*i];\
842  int y1 = s->temp[0][2*i+1];
843 #define STAT2\
844  s->stats[0][y0]++;\
845  s->stats[0][y1]++;
846 #define WRITE2\
847  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
848  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
849 
850  count/=2;
851  if(s->flags&CODEC_FLAG_PASS1){
852  for(i=0; i<count; i++){
853  LOAD2;
854  STAT2;
855  }
856  }
858  return 0;
859 
860  if(s->context){
861  for(i=0; i<count; i++){
862  LOAD2;
863  STAT2;
864  WRITE2;
865  }
866  }else{
867  for(i=0; i<count; i++){
868  LOAD2;
869  WRITE2;
870  }
871  }
872  return 0;
873 }
874 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
875 
876 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
877  int i;
878  for(i=0; i<count; i++){
879  int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
880  if(code != -1){
881  *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
882  }else if(decorrelate){
883  s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
884  s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
885  s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
886  }else{
887  s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
888  s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
889  s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
890  }
891  if(alpha)
892  s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
893  }
894 }
895 
896 static void decode_bgr_bitstream(HYuvContext *s, int count){
897  if(s->decorrelate){
898  if(s->bitstream_bpp==24)
899  decode_bgr_1(s, count, 1, 0);
900  else
901  decode_bgr_1(s, count, 1, 1);
902  }else{
903  if(s->bitstream_bpp==24)
904  decode_bgr_1(s, count, 0, 0);
905  else
906  decode_bgr_1(s, count, 0, 1);
907  }
908 }
909 
910 static int encode_bgr_bitstream(HYuvContext *s, int count){
911  int i;
912 
913  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
914  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
915  return -1;
916  }
917 
918 #define LOAD3\
919  int g= s->temp[0][4*i+G];\
920  int b= (s->temp[0][4*i+B] - g) & 0xff;\
921  int r= (s->temp[0][4*i+R] - g) & 0xff;
922 #define STAT3\
923  s->stats[0][b]++;\
924  s->stats[1][g]++;\
925  s->stats[2][r]++;
926 #define WRITE3\
927  put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
928  put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
929  put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
930 
932  for(i=0; i<count; i++){
933  LOAD3;
934  STAT3;
935  }
936  }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
937  for(i=0; i<count; i++){
938  LOAD3;
939  STAT3;
940  WRITE3;
941  }
942  }else{
943  for(i=0; i<count; i++){
944  LOAD3;
945  WRITE3;
946  }
947  }
948  return 0;
949 }
950 
951 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
952 static void draw_slice(HYuvContext *s, int y){
953  int h, cy, i;
954  int offset[AV_NUM_DATA_POINTERS];
955 
956  if(s->avctx->draw_horiz_band==NULL)
957  return;
958 
959  h= y - s->last_slice_end;
960  y -= h;
961 
962  if(s->bitstream_bpp==12){
963  cy= y>>1;
964  }else{
965  cy= y;
966  }
967 
968  offset[0] = s->picture.linesize[0]*y;
969  offset[1] = s->picture.linesize[1]*cy;
970  offset[2] = s->picture.linesize[2]*cy;
971  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
972  offset[i] = 0;
973  emms_c();
974 
975  s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
976 
977  s->last_slice_end= y + h;
978 }
979 
980 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
981  const uint8_t *buf = avpkt->data;
982  int buf_size = avpkt->size;
983  HYuvContext *s = avctx->priv_data;
984  const int width= s->width;
985  const int width2= s->width>>1;
986  const int height= s->height;
987  int fake_ystride, fake_ustride, fake_vstride;
988  AVFrame * const p= &s->picture;
989  int table_size = 0, ret;
990 
991  AVFrame *picture = data;
992 
994  if (!s->bitstream_buffer)
995  return AVERROR(ENOMEM);
996 
997  memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
998  s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
999 
1000  if(p->data[0])
1001  ff_thread_release_buffer(avctx, p);
1002 
1003  p->reference= 0;
1004  if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
1005  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1006  return ret;
1007  }
1008 
1009  if(s->context){
1010  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1011  if(table_size < 0)
1012  return table_size;
1013  }
1014 
1015  if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1016  return -1;
1017 
1018  if ((ret = init_get_bits(&s->gb, s->bitstream_buffer + table_size,
1019  (buf_size - table_size) * 8)) < 0)
1020  return ret;
1021 
1022  fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1023  fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1024  fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1025 
1026  s->last_slice_end= 0;
1027 
1028  if(s->bitstream_bpp<24){
1029  int y, cy;
1030  int lefty, leftu, leftv;
1031  int lefttopy, lefttopu, lefttopv;
1032 
1033  if(s->yuy2){
1034  p->data[0][3]= get_bits(&s->gb, 8);
1035  p->data[0][2]= get_bits(&s->gb, 8);
1036  p->data[0][1]= get_bits(&s->gb, 8);
1037  p->data[0][0]= get_bits(&s->gb, 8);
1038 
1039  av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1040  return -1;
1041  }else{
1042 
1043  leftv= p->data[2][0]= get_bits(&s->gb, 8);
1044  lefty= p->data[0][1]= get_bits(&s->gb, 8);
1045  leftu= p->data[1][0]= get_bits(&s->gb, 8);
1046  p->data[0][0]= get_bits(&s->gb, 8);
1047 
1048  switch(s->predictor){
1049  case LEFT:
1050  case PLANE:
1051  decode_422_bitstream(s, width-2);
1052  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1053  if(!(s->flags&CODEC_FLAG_GRAY)){
1054  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1055  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1056  }
1057 
1058  for(cy=y=1; y<s->height; y++,cy++){
1059  uint8_t *ydst, *udst, *vdst;
1060 
1061  if(s->bitstream_bpp==12){
1062  decode_gray_bitstream(s, width);
1063 
1064  ydst= p->data[0] + p->linesize[0]*y;
1065 
1066  lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1067  if(s->predictor == PLANE){
1068  if(y>s->interlaced)
1069  s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1070  }
1071  y++;
1072  if(y>=s->height) break;
1073  }
1074 
1075  draw_slice(s, y);
1076 
1077  ydst= p->data[0] + p->linesize[0]*y;
1078  udst= p->data[1] + p->linesize[1]*cy;
1079  vdst= p->data[2] + p->linesize[2]*cy;
1080 
1081  decode_422_bitstream(s, width);
1082  lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1083  if(!(s->flags&CODEC_FLAG_GRAY)){
1084  leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1085  leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1086  }
1087  if(s->predictor == PLANE){
1088  if(cy>s->interlaced){
1089  s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1090  if(!(s->flags&CODEC_FLAG_GRAY)){
1091  s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1092  s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1093  }
1094  }
1095  }
1096  }
1097  draw_slice(s, height);
1098 
1099  break;
1100  case MEDIAN:
1101  /* first line except first 2 pixels is left predicted */
1102  decode_422_bitstream(s, width-2);
1103  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1104  if(!(s->flags&CODEC_FLAG_GRAY)){
1105  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1106  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1107  }
1108 
1109  cy=y=1;
1110 
1111  /* second line is left predicted for interlaced case */
1112  if(s->interlaced){
1113  decode_422_bitstream(s, width);
1114  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1115  if(!(s->flags&CODEC_FLAG_GRAY)){
1116  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1117  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1118  }
1119  y++; cy++;
1120  }
1121 
1122  /* next 4 pixels are left predicted too */
1123  decode_422_bitstream(s, 4);
1124  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1125  if(!(s->flags&CODEC_FLAG_GRAY)){
1126  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1127  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1128  }
1129 
1130  /* next line except the first 4 pixels is median predicted */
1131  lefttopy= p->data[0][3];
1132  decode_422_bitstream(s, width-4);
1133  s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1134  if(!(s->flags&CODEC_FLAG_GRAY)){
1135  lefttopu= p->data[1][1];
1136  lefttopv= p->data[2][1];
1137  s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1138  s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1139  }
1140  y++; cy++;
1141 
1142  for(; y<height; y++,cy++){
1143  uint8_t *ydst, *udst, *vdst;
1144 
1145  if(s->bitstream_bpp==12){
1146  while(2*cy > y){
1147  decode_gray_bitstream(s, width);
1148  ydst= p->data[0] + p->linesize[0]*y;
1149  s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1150  y++;
1151  }
1152  if(y>=height) break;
1153  }
1154  draw_slice(s, y);
1155 
1156  decode_422_bitstream(s, width);
1157 
1158  ydst= p->data[0] + p->linesize[0]*y;
1159  udst= p->data[1] + p->linesize[1]*cy;
1160  vdst= p->data[2] + p->linesize[2]*cy;
1161 
1162  s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1163  if(!(s->flags&CODEC_FLAG_GRAY)){
1164  s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1165  s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1166  }
1167  }
1168 
1169  draw_slice(s, height);
1170  break;
1171  }
1172  }
1173  }else{
1174  int y;
1175  int leftr, leftg, leftb, lefta;
1176  const int last_line= (height-1)*p->linesize[0];
1177 
1178  if(s->bitstream_bpp==32){
1179  lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1180  leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1181  leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1182  leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1183  }else{
1184  leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1185  leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1186  leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1187  lefta= p->data[0][last_line+A]= 255;
1188  skip_bits(&s->gb, 8);
1189  }
1190 
1191  if(s->bgr32){
1192  switch(s->predictor){
1193  case LEFT:
1194  case PLANE:
1195  decode_bgr_bitstream(s, width-1);
1196  s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1197 
1198  for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1199  decode_bgr_bitstream(s, width);
1200 
1201  s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1202  if(s->predictor == PLANE){
1203  if(s->bitstream_bpp!=32) lefta=0;
1204  if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1205  s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1206  p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1207  }
1208  }
1209  }
1210  draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1211  break;
1212  default:
1213  av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1214  }
1215  }else{
1216 
1217  av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1218  return -1;
1219  }
1220  }
1221  emms_c();
1222 
1223  *picture= *p;
1224  *data_size = sizeof(AVFrame);
1225 
1226  return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1227 }
1228 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1229 
1230 static int common_end(HYuvContext *s){
1231  int i;
1232 
1233  for(i=0; i<3; i++){
1234  av_freep(&s->temp[i]);
1235  }
1236  return 0;
1237 }
1238 
1239 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1240 static av_cold int decode_end(AVCodecContext *avctx)
1241 {
1242  HYuvContext *s = avctx->priv_data;
1243  int i;
1244 
1245  if (s->picture.data[0])
1246  avctx->release_buffer(avctx, &s->picture);
1247 
1248  common_end(s);
1250 
1251  for(i=0; i<6; i++){
1252  ff_free_vlc(&s->vlc[i]);
1253  }
1254 
1255  return 0;
1256 }
1257 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1258 
1259 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1260 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1261  HYuvContext *s = avctx->priv_data;
1262  AVFrame *pict = data;
1263  const int width= s->width;
1264  const int width2= s->width>>1;
1265  const int height= s->height;
1266  const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1267  const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1268  const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1269  AVFrame * const p= &s->picture;
1270  int i, j, size=0;
1271 
1272  *p = *pict;
1274  p->key_frame= 1;
1275 
1276  if(s->context){
1277  for(i=0; i<3; i++){
1278  generate_len_table(s->len[i], s->stats[i]);
1279  if(generate_bits_table(s->bits[i], s->len[i])<0)
1280  return -1;
1281  size+= store_table(s, s->len[i], &buf[size]);
1282  }
1283 
1284  for(i=0; i<3; i++)
1285  for(j=0; j<256; j++)
1286  s->stats[i][j] >>= 1;
1287  }
1288 
1289  init_put_bits(&s->pb, buf+size, buf_size-size);
1290 
1291  if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1292  int lefty, leftu, leftv, y, cy;
1293 
1294  put_bits(&s->pb, 8, leftv= p->data[2][0]);
1295  put_bits(&s->pb, 8, lefty= p->data[0][1]);
1296  put_bits(&s->pb, 8, leftu= p->data[1][0]);
1297  put_bits(&s->pb, 8, p->data[0][0]);
1298 
1299  lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1300  leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1301  leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1302 
1303  encode_422_bitstream(s, 2, width-2);
1304 
1305  if(s->predictor==MEDIAN){
1306  int lefttopy, lefttopu, lefttopv;
1307  cy=y=1;
1308  if(s->interlaced){
1309  lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1310  leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1311  leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1312 
1313  encode_422_bitstream(s, 0, width);
1314  y++; cy++;
1315  }
1316 
1317  lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1318  leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1319  leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1320 
1321  encode_422_bitstream(s, 0, 4);
1322 
1323  lefttopy= p->data[0][3];
1324  lefttopu= p->data[1][1];
1325  lefttopv= p->data[2][1];
1326  s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1327  s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1328  s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1329  encode_422_bitstream(s, 0, width-4);
1330  y++; cy++;
1331 
1332  for(; y<height; y++,cy++){
1333  uint8_t *ydst, *udst, *vdst;
1334 
1335  if(s->bitstream_bpp==12){
1336  while(2*cy > y){
1337  ydst= p->data[0] + p->linesize[0]*y;
1338  s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1339  encode_gray_bitstream(s, width);
1340  y++;
1341  }
1342  if(y>=height) break;
1343  }
1344  ydst= p->data[0] + p->linesize[0]*y;
1345  udst= p->data[1] + p->linesize[1]*cy;
1346  vdst= p->data[2] + p->linesize[2]*cy;
1347 
1348  s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1349  s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1350  s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1351 
1352  encode_422_bitstream(s, 0, width);
1353  }
1354  }else{
1355  for(cy=y=1; y<height; y++,cy++){
1356  uint8_t *ydst, *udst, *vdst;
1357 
1358  /* encode a luma only line & y++ */
1359  if(s->bitstream_bpp==12){
1360  ydst= p->data[0] + p->linesize[0]*y;
1361 
1362  if(s->predictor == PLANE && s->interlaced < y){
1363  s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1364 
1365  lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1366  }else{
1367  lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1368  }
1369  encode_gray_bitstream(s, width);
1370  y++;
1371  if(y>=height) break;
1372  }
1373 
1374  ydst= p->data[0] + p->linesize[0]*y;
1375  udst= p->data[1] + p->linesize[1]*cy;
1376  vdst= p->data[2] + p->linesize[2]*cy;
1377 
1378  if(s->predictor == PLANE && s->interlaced < cy){
1379  s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1380  s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1381  s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1382 
1383  lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1384  leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1385  leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1386  }else{
1387  lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1388  leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1389  leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1390  }
1391 
1392  encode_422_bitstream(s, 0, width);
1393  }
1394  }
1395  }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1396  uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1397  const int stride = -p->linesize[0];
1398  const int fake_stride = -fake_ystride;
1399  int y;
1400  int leftr, leftg, leftb;
1401 
1402  put_bits(&s->pb, 8, leftr= data[R]);
1403  put_bits(&s->pb, 8, leftg= data[G]);
1404  put_bits(&s->pb, 8, leftb= data[B]);
1405  put_bits(&s->pb, 8, 0);
1406 
1407  sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1408  encode_bgr_bitstream(s, width-1);
1409 
1410  for(y=1; y<s->height; y++){
1411  uint8_t *dst = data + y*stride;
1412  if(s->predictor == PLANE && s->interlaced < y){
1413  s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1414  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1415  }else{
1416  sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1417  }
1418  encode_bgr_bitstream(s, width);
1419  }
1420  }else{
1421  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1422  }
1423  emms_c();
1424 
1425  size+= (put_bits_count(&s->pb)+31)/8;
1426  put_bits(&s->pb, 16, 0);
1427  put_bits(&s->pb, 15, 0);
1428  size/= 4;
1429 
1430  if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1431  int j;
1432  char *p= avctx->stats_out;
1433  char *end= p + 1024*30;
1434  for(i=0; i<3; i++){
1435  for(j=0; j<256; j++){
1436  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1437  p+= strlen(p);
1438  s->stats[i][j]= 0;
1439  }
1440  snprintf(p, end-p, "\n");
1441  p++;
1442  }
1443  } else
1444  avctx->stats_out[0] = '\0';
1445  if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1446  flush_put_bits(&s->pb);
1447  s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1448  }
1449 
1450  s->picture_number++;
1451 
1452  return size*4;
1453 }
1454 
1455 static av_cold int encode_end(AVCodecContext *avctx)
1456 {
1457  HYuvContext *s = avctx->priv_data;
1458 
1459  common_end(s);
1460 
1461  av_freep(&avctx->extradata);
1462  av_freep(&avctx->stats_out);
1463 
1464  return 0;
1465 }
1466 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1467 
1468 #if CONFIG_HUFFYUV_DECODER
1469 AVCodec ff_huffyuv_decoder = {
1470  .name = "huffyuv",
1471  .type = AVMEDIA_TYPE_VIDEO,
1472  .id = CODEC_ID_HUFFYUV,
1473  .priv_data_size = sizeof(HYuvContext),
1474  .init = decode_init,
1475  .close = decode_end,
1476  .decode = decode_frame,
1478  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1479  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1480 };
1481 #endif
1482 
1483 #if CONFIG_FFVHUFF_DECODER
1484 AVCodec ff_ffvhuff_decoder = {
1485  .name = "ffvhuff",
1486  .type = AVMEDIA_TYPE_VIDEO,
1487  .id = CODEC_ID_FFVHUFF,
1488  .priv_data_size = sizeof(HYuvContext),
1489  .init = decode_init,
1490  .close = decode_end,
1491  .decode = decode_frame,
1493  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1494  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1495 };
1496 #endif
1497 
1498 #if CONFIG_HUFFYUV_ENCODER
1499 AVCodec ff_huffyuv_encoder = {
1500  .name = "huffyuv",
1501  .type = AVMEDIA_TYPE_VIDEO,
1502  .id = CODEC_ID_HUFFYUV,
1503  .priv_data_size = sizeof(HYuvContext),
1504  .init = encode_init,
1505  .encode = encode_frame,
1506  .close = encode_end,
1507  .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1508  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1509 };
1510 #endif
1511 
1512 #if CONFIG_FFVHUFF_ENCODER
1513 AVCodec ff_ffvhuff_encoder = {
1514  .name = "ffvhuff",
1515  .type = AVMEDIA_TYPE_VIDEO,
1516  .id = CODEC_ID_FFVHUFF,
1517  .priv_data_size = sizeof(HYuvContext),
1518  .init = encode_init,
1519  .encode = encode_frame,
1520  .close = encode_end,
1522  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1523 };
1524 #endif