apedec.c
Go to the documentation of this file.
1 /*
2  * Monkey's Audio lossless audio decoder
3  * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
4  * based upon libdemac from Dave Chapman.
5  *
6  * This file is part of Libav.
7  *
8  * Libav is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * Libav is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with Libav; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #define BITSTREAM_READER_LE
24 #include "avcodec.h"
25 #include "internal.h"
26 #include "dsputil.h"
27 #include "get_bits.h"
28 #include "bytestream.h"
29 #include "libavutil/audioconvert.h"
30 #include "libavutil/avassert.h"
31 
37 #define BLOCKS_PER_LOOP 4608
38 #define MAX_CHANNELS 2
39 #define MAX_BYTESPERSAMPLE 3
40 
41 #define APE_FRAMECODE_MONO_SILENCE 1
42 #define APE_FRAMECODE_STEREO_SILENCE 3
43 #define APE_FRAMECODE_PSEUDO_STEREO 4
44 
45 #define HISTORY_SIZE 512
46 #define PREDICTOR_ORDER 8
47 
48 #define PREDICTOR_SIZE 50
49 
50 #define YDELAYA (18 + PREDICTOR_ORDER*4)
51 #define YDELAYB (18 + PREDICTOR_ORDER*3)
52 #define XDELAYA (18 + PREDICTOR_ORDER*2)
53 #define XDELAYB (18 + PREDICTOR_ORDER)
54 
55 #define YADAPTCOEFFSA 18
56 #define XADAPTCOEFFSA 14
57 #define YADAPTCOEFFSB 10
58 #define XADAPTCOEFFSB 5
59 
70 };
73 #define APE_FILTER_LEVELS 3
74 
76 static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
77  { 0, 0, 0 },
78  { 16, 0, 0 },
79  { 64, 0, 0 },
80  { 32, 256, 0 },
81  { 16, 256, 1280 }
82 };
83 
85 static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = {
86  { 0, 0, 0 },
87  { 11, 0, 0 },
88  { 11, 0, 0 },
89  { 10, 13, 0 },
90  { 11, 13, 15 }
91 };
92 
93 
95 typedef struct APEFilter {
96  int16_t *coeffs;
97  int16_t *adaptcoeffs;
98  int16_t *historybuffer;
99  int16_t *delay;
100 
101  int avg;
102 } APEFilter;
103 
104 typedef struct APERice {
105  uint32_t k;
106  uint32_t ksum;
107 } APERice;
108 
109 typedef struct APERangecoder {
110  uint32_t low;
111  uint32_t range;
112  uint32_t help;
113  unsigned int buffer;
114 } APERangecoder;
115 
117 typedef struct APEPredictor {
118  int32_t *buf;
119 
120  int32_t lastA[2];
121 
122  int32_t filterA[2];
123  int32_t filterB[2];
124 
125  int32_t coeffsA[2][4];
126  int32_t coeffsB[2][5];
128 } APEPredictor;
129 
131 typedef struct APEContext {
135  int channels;
136  int samples;
137 
140  int fset;
141  int flags;
142 
143  uint32_t CRC;
146 
149 
151 
156 
157  uint8_t *data;
158  uint8_t *data_end;
159  const uint8_t *ptr;
160 
161  int error;
162 } APEContext;
163 
164 // TODO: dsputilize
165 
167 {
168  APEContext *s = avctx->priv_data;
169  int i;
170 
171  for (i = 0; i < APE_FILTER_LEVELS; i++)
172  av_freep(&s->filterbuf[i]);
173 
174  av_freep(&s->data);
175  return 0;
176 }
177 
179 {
180  APEContext *s = avctx->priv_data;
181  int i;
182 
183  if (avctx->extradata_size != 6) {
184  av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
185  return AVERROR(EINVAL);
186  }
187  if (avctx->bits_per_coded_sample != 16) {
188  av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n");
189  return AVERROR(EINVAL);
190  }
191  if (avctx->channels > 2) {
192  av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
193  return AVERROR(EINVAL);
194  }
195  s->avctx = avctx;
196  s->channels = avctx->channels;
197  s->fileversion = AV_RL16(avctx->extradata);
198  s->compression_level = AV_RL16(avctx->extradata + 2);
199  s->flags = AV_RL16(avctx->extradata + 4);
200 
201  av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n",
202  s->compression_level, s->flags);
204  av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
205  s->compression_level);
206  return AVERROR_INVALIDDATA;
207  }
208  s->fset = s->compression_level / 1000 - 1;
209  for (i = 0; i < APE_FILTER_LEVELS; i++) {
210  if (!ape_filter_orders[s->fset][i])
211  break;
212  FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
213  (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
214  filter_alloc_fail);
215  }
216 
217  dsputil_init(&s->dsp, avctx);
218  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
220 
222  avctx->coded_frame = &s->frame;
223 
224  return 0;
225 filter_alloc_fail:
226  ape_decode_close(avctx);
227  return AVERROR(ENOMEM);
228 }
229 
235 #define CODE_BITS 32
236 #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
237 #define SHIFT_BITS (CODE_BITS - 9)
238 #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
239 #define BOTTOM_VALUE (TOP_VALUE >> 8)
240 
242 static inline void range_start_decoding(APEContext *ctx)
243 {
244  ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
245  ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
246  ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
247 }
248 
250 static inline void range_dec_normalize(APEContext *ctx)
251 {
252  while (ctx->rc.range <= BOTTOM_VALUE) {
253  ctx->rc.buffer <<= 8;
254  if(ctx->ptr < ctx->data_end) {
255  ctx->rc.buffer += *ctx->ptr;
256  ctx->ptr++;
257  } else {
258  ctx->error = 1;
259  }
260  ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
261  ctx->rc.range <<= 8;
262  }
263 }
264 
271 static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
272 {
273  range_dec_normalize(ctx);
274  ctx->rc.help = ctx->rc.range / tot_f;
275  return ctx->rc.low / ctx->rc.help;
276 }
277 
283 static inline int range_decode_culshift(APEContext *ctx, int shift)
284 {
285  range_dec_normalize(ctx);
286  ctx->rc.help = ctx->rc.range >> shift;
287  return ctx->rc.low / ctx->rc.help;
288 }
289 
290 
297 static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
298 {
299  ctx->rc.low -= ctx->rc.help * lt_f;
300  ctx->rc.range = ctx->rc.help * sy_f;
301 }
302 
304 static inline int range_decode_bits(APEContext *ctx, int n)
305 {
306  int sym = range_decode_culshift(ctx, n);
307  range_decode_update(ctx, 1, sym);
308  return sym;
309 }
310 
311 
312 #define MODEL_ELEMENTS 64
313 
317 static const uint16_t counts_3970[22] = {
318  0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
319  62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
320  65450, 65469, 65480, 65487, 65491, 65493,
321 };
322 
326 static const uint16_t counts_diff_3970[21] = {
327  14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
328  1104, 677, 415, 248, 150, 89, 54, 31,
329  19, 11, 7, 4, 2,
330 };
331 
335 static const uint16_t counts_3980[22] = {
336  0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
337  64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
338  65485, 65488, 65490, 65491, 65492, 65493,
339 };
340 
344 static const uint16_t counts_diff_3980[21] = {
345  19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
346  261, 119, 65, 31, 19, 10, 6, 3,
347  3, 2, 1, 1, 1,
348 };
349 
356 static inline int range_get_symbol(APEContext *ctx,
357  const uint16_t counts[],
358  const uint16_t counts_diff[])
359 {
360  int symbol, cf;
361 
362  cf = range_decode_culshift(ctx, 16);
363 
364  if(cf > 65492){
365  symbol= cf - 65535 + 63;
366  range_decode_update(ctx, 1, cf);
367  if(cf > 65535)
368  ctx->error=1;
369  return symbol;
370  }
371  /* figure out the symbol inefficiently; a binary search would be much better */
372  for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
373 
374  range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
375 
376  return symbol;
377 } // group rangecoder
379 
380 static inline void update_rice(APERice *rice, int x)
381 {
382  int lim = rice->k ? (1 << (rice->k + 4)) : 0;
383  rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
384 
385  if (rice->ksum < lim)
386  rice->k--;
387  else if (rice->ksum >= (1 << (rice->k + 5)))
388  rice->k++;
389 }
390 
391 static inline int ape_decode_value(APEContext *ctx, APERice *rice)
392 {
393  int x, overflow;
394 
395  if (ctx->fileversion < 3990) {
396  int tmpk;
397 
399 
400  if (overflow == (MODEL_ELEMENTS - 1)) {
401  tmpk = range_decode_bits(ctx, 5);
402  overflow = 0;
403  } else
404  tmpk = (rice->k < 1) ? 0 : rice->k - 1;
405 
406  if (tmpk <= 16)
407  x = range_decode_bits(ctx, tmpk);
408  else if (tmpk <= 32) {
409  x = range_decode_bits(ctx, 16);
410  x |= (range_decode_bits(ctx, tmpk - 16) << 16);
411  } else {
412  av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
413  return AVERROR_INVALIDDATA;
414  }
415  x += overflow << tmpk;
416  } else {
417  int base, pivot;
418 
419  pivot = rice->ksum >> 5;
420  if (pivot == 0)
421  pivot = 1;
422 
424 
425  if (overflow == (MODEL_ELEMENTS - 1)) {
426  overflow = range_decode_bits(ctx, 16) << 16;
427  overflow |= range_decode_bits(ctx, 16);
428  }
429 
430  if (pivot < 0x10000) {
431  base = range_decode_culfreq(ctx, pivot);
432  range_decode_update(ctx, 1, base);
433  } else {
434  int base_hi = pivot, base_lo;
435  int bbits = 0;
436 
437  while (base_hi & ~0xFFFF) {
438  base_hi >>= 1;
439  bbits++;
440  }
441  base_hi = range_decode_culfreq(ctx, base_hi + 1);
442  range_decode_update(ctx, 1, base_hi);
443  base_lo = range_decode_culfreq(ctx, 1 << bbits);
444  range_decode_update(ctx, 1, base_lo);
445 
446  base = (base_hi << bbits) + base_lo;
447  }
448 
449  x = base + overflow * pivot;
450  }
451 
452  update_rice(rice, x);
453 
454  /* Convert to signed */
455  if (x & 1)
456  return (x >> 1) + 1;
457  else
458  return -(x >> 1);
459 }
460 
461 static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo)
462 {
463  int32_t *decoded0 = ctx->decoded0;
464  int32_t *decoded1 = ctx->decoded1;
465 
467  /* We are pure silence, just memset the output buffer. */
468  memset(decoded0, 0, blockstodecode * sizeof(int32_t));
469  memset(decoded1, 0, blockstodecode * sizeof(int32_t));
470  } else {
471  while (blockstodecode--) {
472  *decoded0++ = ape_decode_value(ctx, &ctx->riceY);
473  if (stereo)
474  *decoded1++ = ape_decode_value(ctx, &ctx->riceX);
475  }
476  }
477 }
478 
480 {
481  /* Read the CRC */
482  if (ctx->data_end - ctx->ptr < 6)
483  return AVERROR_INVALIDDATA;
484  ctx->CRC = bytestream_get_be32(&ctx->ptr);
485 
486  /* Read the frame flags if they exist */
487  ctx->frameflags = 0;
488  if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
489  ctx->CRC &= ~0x80000000;
490 
491  if (ctx->data_end - ctx->ptr < 6)
492  return AVERROR_INVALIDDATA;
493  ctx->frameflags = bytestream_get_be32(&ctx->ptr);
494  }
495 
496  /* Initialize the rice structs */
497  ctx->riceX.k = 10;
498  ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
499  ctx->riceY.k = 10;
500  ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
501 
502  /* The first 8 bits of input are ignored. */
503  ctx->ptr++;
504 
506 
507  return 0;
508 }
509 
510 static const int32_t initial_coeffs[4] = {
511  360, 317, -109, 98
512 };
513 
515 {
516  APEPredictor *p = &ctx->predictor;
517 
518  /* Zero the history buffers */
519  memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t));
520  p->buf = p->historybuffer;
521 
522  /* Initialize and zero the coefficients */
523  memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs));
524  memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs));
525  memset(p->coeffsB, 0, sizeof(p->coeffsB));
526 
527  p->filterA[0] = p->filterA[1] = 0;
528  p->filterB[0] = p->filterB[1] = 0;
529  p->lastA[0] = p->lastA[1] = 0;
530 }
531 
533 static inline int APESIGN(int32_t x) {
534  return (x < 0) - (x > 0);
535 }
536 
538  const int decoded, const int filter,
539  const int delayA, const int delayB,
540  const int adaptA, const int adaptB)
541 {
542  int32_t predictionA, predictionB, sign;
543 
544  p->buf[delayA] = p->lastA[filter];
545  p->buf[adaptA] = APESIGN(p->buf[delayA]);
546  p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];
547  p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
548 
549  predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
550  p->buf[delayA - 1] * p->coeffsA[filter][1] +
551  p->buf[delayA - 2] * p->coeffsA[filter][2] +
552  p->buf[delayA - 3] * p->coeffsA[filter][3];
553 
554  /* Apply a scaled first-order filter compression */
555  p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);
556  p->buf[adaptB] = APESIGN(p->buf[delayB]);
557  p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];
558  p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
559  p->filterB[filter] = p->filterA[filter ^ 1];
560 
561  predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
562  p->buf[delayB - 1] * p->coeffsB[filter][1] +
563  p->buf[delayB - 2] * p->coeffsB[filter][2] +
564  p->buf[delayB - 3] * p->coeffsB[filter][3] +
565  p->buf[delayB - 4] * p->coeffsB[filter][4];
566 
567  p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);
568  p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
569 
570  sign = APESIGN(decoded);
571  p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
572  p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
573  p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
574  p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
575  p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
576  p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
577  p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
578  p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
579  p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
580 
581  return p->filterA[filter];
582 }
583 
584 static void predictor_decode_stereo(APEContext *ctx, int count)
585 {
586  APEPredictor *p = &ctx->predictor;
587  int32_t *decoded0 = ctx->decoded0;
588  int32_t *decoded1 = ctx->decoded1;
589 
590  while (count--) {
591  /* Predictor Y */
592  *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
594  decoded0++;
595  *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
597  decoded1++;
598 
599  /* Combined */
600  p->buf++;
601 
602  /* Have we filled the history buffer? */
603  if (p->buf == p->historybuffer + HISTORY_SIZE) {
604  memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
605  p->buf = p->historybuffer;
606  }
607  }
608 }
609 
610 static void predictor_decode_mono(APEContext *ctx, int count)
611 {
612  APEPredictor *p = &ctx->predictor;
613  int32_t *decoded0 = ctx->decoded0;
614  int32_t predictionA, currentA, A, sign;
615 
616  currentA = p->lastA[0];
617 
618  while (count--) {
619  A = *decoded0;
620 
621  p->buf[YDELAYA] = currentA;
622  p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];
623 
624  predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
625  p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
626  p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
627  p->buf[YDELAYA - 3] * p->coeffsA[0][3];
628 
629  currentA = A + (predictionA >> 10);
630 
631  p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
632  p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
633 
634  sign = APESIGN(A);
635  p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
636  p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
637  p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
638  p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
639 
640  p->buf++;
641 
642  /* Have we filled the history buffer? */
643  if (p->buf == p->historybuffer + HISTORY_SIZE) {
644  memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t));
645  p->buf = p->historybuffer;
646  }
647 
648  p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);
649  *(decoded0++) = p->filterA[0];
650  }
651 
652  p->lastA[0] = currentA;
653 }
654 
655 static void do_init_filter(APEFilter *f, int16_t *buf, int order)
656 {
657  f->coeffs = buf;
658  f->historybuffer = buf + order;
659  f->delay = f->historybuffer + order * 2;
660  f->adaptcoeffs = f->historybuffer + order;
661 
662  memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t));
663  memset(f->coeffs, 0, order * sizeof(int16_t));
664  f->avg = 0;
665 }
666 
667 static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
668 {
669  do_init_filter(&f[0], buf, order);
670  do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
671 }
672 
673 static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
674  int32_t *data, int count, int order, int fracbits)
675 {
676  int res;
677  int absres;
678 
679  while (count--) {
680  /* round fixedpoint scalar product */
681  res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order,
682  f->adaptcoeffs - order,
683  order, APESIGN(*data));
684  res = (res + (1 << (fracbits - 1))) >> fracbits;
685  res += *data;
686  *data++ = res;
687 
688  /* Update the output history */
689  *f->delay++ = av_clip_int16(res);
690 
691  if (version < 3980) {
692  /* Version ??? to < 3.98 files (untested) */
693  f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
694  f->adaptcoeffs[-4] >>= 1;
695  f->adaptcoeffs[-8] >>= 1;
696  } else {
697  /* Version 3.98 and later files */
698 
699  /* Update the adaption coefficients */
700  absres = FFABS(res);
701  if (absres)
702  *f->adaptcoeffs = ((res & (-1<<31)) ^ (-1<<30)) >>
703  (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
704  else
705  *f->adaptcoeffs = 0;
706 
707  f->avg += (absres - f->avg) / 16;
708 
709  f->adaptcoeffs[-1] >>= 1;
710  f->adaptcoeffs[-2] >>= 1;
711  f->adaptcoeffs[-8] >>= 1;
712  }
713 
714  f->adaptcoeffs++;
715 
716  /* Have we filled the history buffer? */
717  if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
718  memmove(f->historybuffer, f->delay - (order * 2),
719  (order * 2) * sizeof(int16_t));
720  f->delay = f->historybuffer + order * 2;
721  f->adaptcoeffs = f->historybuffer + order;
722  }
723  }
724 }
725 
726 static void apply_filter(APEContext *ctx, APEFilter *f,
727  int32_t *data0, int32_t *data1,
728  int count, int order, int fracbits)
729 {
730  do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
731  if (data1)
732  do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
733 }
734 
735 static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
736  int32_t *decoded1, int count)
737 {
738  int i;
739 
740  for (i = 0; i < APE_FILTER_LEVELS; i++) {
741  if (!ape_filter_orders[ctx->fset][i])
742  break;
743  apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
744  ape_filter_orders[ctx->fset][i],
745  ape_filter_fracbits[ctx->fset][i]);
746  }
747 }
748 
750 {
751  int i, ret;
752  if ((ret = init_entropy_decoder(ctx)) < 0)
753  return ret;
755 
756  for (i = 0; i < APE_FILTER_LEVELS; i++) {
757  if (!ape_filter_orders[ctx->fset][i])
758  break;
759  init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
760  ape_filter_orders[ctx->fset][i]);
761  }
762  return 0;
763 }
764 
765 static void ape_unpack_mono(APEContext *ctx, int count)
766 {
767  int32_t *decoded0 = ctx->decoded0;
768  int32_t *decoded1 = ctx->decoded1;
769 
771  entropy_decode(ctx, count, 0);
772  /* We are pure silence, so we're done. */
773  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
774  return;
775  }
776 
777  entropy_decode(ctx, count, 0);
778  ape_apply_filters(ctx, decoded0, NULL, count);
779 
780  /* Now apply the predictor decoding */
781  predictor_decode_mono(ctx, count);
782 
783  /* Pseudo-stereo - just copy left channel to right channel */
784  if (ctx->channels == 2) {
785  memcpy(decoded1, decoded0, count * sizeof(*decoded1));
786  }
787 }
788 
789 static void ape_unpack_stereo(APEContext *ctx, int count)
790 {
791  int32_t left, right;
792  int32_t *decoded0 = ctx->decoded0;
793  int32_t *decoded1 = ctx->decoded1;
794 
796  /* We are pure silence, so we're done. */
797  av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
798  return;
799  }
800 
801  entropy_decode(ctx, count, 1);
802  ape_apply_filters(ctx, decoded0, decoded1, count);
803 
804  /* Now apply the predictor decoding */
805  predictor_decode_stereo(ctx, count);
806 
807  /* Decorrelate and scale to output depth */
808  while (count--) {
809  left = *decoded1 - (*decoded0 / 2);
810  right = left + *decoded0;
811 
812  *(decoded0++) = left;
813  *(decoded1++) = right;
814  }
815 }
816 
817 static int ape_decode_frame(AVCodecContext *avctx, void *data,
818  int *got_frame_ptr, AVPacket *avpkt)
819 {
820  const uint8_t *buf = avpkt->data;
821  int buf_size = avpkt->size;
822  APEContext *s = avctx->priv_data;
823  int16_t *samples;
824  int i, ret;
825  int blockstodecode;
826 
827  /* this should never be negative, but bad things will happen if it is, so
828  check it just to make sure. */
829  av_assert0(s->samples >= 0);
830 
831  if(!s->samples){
832  uint32_t nblocks, offset;
833  void *tmp_data;
834 
835  if (!buf_size) {
836  *got_frame_ptr = 0;
837  return 0;
838  }
839  if (buf_size < 8) {
840  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
841  return AVERROR_INVALIDDATA;
842  }
843 
844  tmp_data = av_realloc(s->data, FFALIGN(buf_size, 4));
845  if (!tmp_data)
846  return AVERROR(ENOMEM);
847  s->data = tmp_data;
848  s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
849  s->ptr = s->data;
850  s->data_end = s->data + buf_size;
851 
852  nblocks = bytestream_get_be32(&s->ptr);
853  offset = bytestream_get_be32(&s->ptr);
854  if (offset > 3) {
855  av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
856  s->data = NULL;
857  return AVERROR_INVALIDDATA;
858  }
859  if (s->data_end - s->ptr < offset) {
860  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
861  return AVERROR_INVALIDDATA;
862  }
863  s->ptr += offset;
864 
865  if (!nblocks || nblocks > INT_MAX) {
866  av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
867  return AVERROR_INVALIDDATA;
868  }
869  s->samples = nblocks;
870 
871  memset(s->decoded0, 0, sizeof(s->decoded0));
872  memset(s->decoded1, 0, sizeof(s->decoded1));
873 
874  /* Initialize the frame decoder */
875  if (init_frame_decoder(s) < 0) {
876  av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
877  return AVERROR_INVALIDDATA;
878  }
879 
880  }
881 
882  if (!s->data) {
883  *got_frame_ptr = 0;
884  return buf_size;
885  }
886 
887  blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples);
888 
889  /* get output buffer */
890  s->frame.nb_samples = blockstodecode;
891  if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
892  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
893  return ret;
894  }
895  samples = (int16_t *)s->frame.data[0];
896 
897  s->error=0;
898 
899  if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
900  ape_unpack_mono(s, blockstodecode);
901  else
902  ape_unpack_stereo(s, blockstodecode);
903  emms_c();
904 
905  if (s->error) {
906  s->samples=0;
907  av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
908  return AVERROR_INVALIDDATA;
909  }
910 
911  for (i = 0; i < blockstodecode; i++) {
912  *samples++ = s->decoded0[i];
913  if(s->channels == 2)
914  *samples++ = s->decoded1[i];
915  }
916 
917  s->samples -= blockstodecode;
918 
919  *got_frame_ptr = 1;
920  *(AVFrame *)data = s->frame;
921 
922  return (s->samples == 0) ? buf_size : 0;
923 }
924 
925 static void ape_flush(AVCodecContext *avctx)
926 {
927  APEContext *s = avctx->priv_data;
928  s->samples= 0;
929 }
930 
932  .name = "ape",
933  .type = AVMEDIA_TYPE_AUDIO,
934  .id = CODEC_ID_APE,
935  .priv_data_size = sizeof(APEContext),
940  .flush = ape_flush,
941  .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
942 };