wmavoice.c
Go to the documentation of this file.
1 /*
2  * Windows Media Audio Voice decoder.
3  * Copyright (c) 2009 Ronald S. Bultje
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include <math.h>
31 #include "avcodec.h"
32 #include "internal.h"
33 #include "get_bits.h"
34 #include "put_bits.h"
35 #include "wmavoice_data.h"
36 #include "celp_math.h"
37 #include "celp_filters.h"
38 #include "acelp_vectors.h"
39 #include "acelp_filters.h"
40 #include "lsp.h"
41 #include "libavutil/lzo.h"
42 #include "dct.h"
43 #include "rdft.h"
44 #include "sinewin.h"
45 
46 #define MAX_BLOCKS 8
47 #define MAX_LSPS 16
48 #define MAX_LSPS_ALIGN16 16
49 
50 #define MAX_FRAMES 3
51 #define MAX_FRAMESIZE 160
52 #define MAX_SIGNAL_HISTORY 416
53 #define MAX_SFRAMESIZE (MAX_FRAMESIZE * MAX_FRAMES)
54 
55 #define SFRAME_CACHE_MAXSIZE 256
56 
57 #define VLC_NBITS 6
58 
59 
63 
67 enum {
70 
71 
72 
73 
75 
76 
77 };
78 
82 enum {
84 
85 
87 
89 
91 
92 
93 };
94 
98 static const struct frame_type_desc {
99  uint8_t n_blocks;
100 
101  uint8_t log_n_blocks;
102  uint8_t acb_type;
103  uint8_t fcb_type;
104  uint8_t dbl_pulses;
105 
106 
107  uint16_t frame_size;
108 
109 } frame_descs[17] = {
110  { 1, 0, ACB_TYPE_NONE, FCB_TYPE_SILENCE, 0, 0 },
111  { 2, 1, ACB_TYPE_NONE, FCB_TYPE_HARDCODED, 0, 28 },
112  { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_AW_PULSES, 0, 46 },
113  { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 80 },
114  { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 104 },
115  { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 0, 108 },
116  { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 132 },
117  { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 168 },
118  { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 64 },
119  { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 80 },
120  { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 104 },
121  { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 108 },
122  { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 132 },
123  { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 168 },
124  { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 176 },
125  { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 208 },
126  { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 256 }
127 };
128 
132 typedef struct {
139 
140 
141 
142  int8_t vbm_tree[25];
143 
144  int spillover_bitsize;
145 
146 
147  int history_nsamples;
148 
149 
150  /* postfilter specific values */
151  int do_apf;
152 
153  int denoise_strength;
154 
155  int denoise_tilt_corr;
156 
157  int dc_level;
158 
159 
160  int lsps;
162  int lsp_def_mode;
163 
164  int frame_lsp_bitsize;
165 
166  int sframe_lsp_bitsize;
167 
168 
171  int pitch_nbits;
172 
173  int block_pitch_nbits;
174 
176  int block_delta_pitch_nbits;
177 
178 
179 
180  int block_delta_pitch_hrange;
181 
182  uint16_t block_conv_table[4];
183 
184 
194  int spillover_nbits;
195 
196 
197 
198  int has_residual_lsps;
199 
200 
201 
202 
203  int skip_bits_next;
204 
205 
206 
207  uint8_t sframe_cache[SFRAME_CACHE_MAXSIZE + FF_INPUT_BUFFER_PADDING_SIZE];
210  int sframe_cache_size;
211 
212 
213 
214 
216 
226  double prev_lsps[MAX_LSPS];
227 
230  int pitch_diff_sh16;
231 
232  float silence_gain;
233 
234  int aw_idx_is_ext;
235 
236  int aw_pulse_range;
237 
238 
239 
240 
241 
242  int aw_n_pulses[2];
243 
244 
245  int aw_first_pulse_off[2];
246 
247  int aw_next_pulse_off_cache;
248 
249 
250 
251 
252 
253  int frame_cntr;
254 
255  float gain_pred_err[6];
256  float excitation_history[MAX_SIGNAL_HISTORY];
260  float synth_history[MAX_LSPS];
261 
270  RDFTContext rdft, irdft;
271 
272  DCTContext dct, dst;
273 
274  float sin[511], cos[511];
275 
276  float postfilter_agc;
277 
278  float dcf_mem[2];
279  float zero_exc_pf[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE];
282  float denoise_filter_cache[MAX_FRAMESIZE];
284  DECLARE_ALIGNED(32, float, tilted_lpcs_pf)[0x80];
286  DECLARE_ALIGNED(32, float, denoise_coeffs_pf)[0x80];
288  DECLARE_ALIGNED(32, float, synth_filter_out_buf)[0x80 + MAX_LSPS_ALIGN16];
291 
295 
305 static av_cold int decode_vbmtree(GetBitContext *gb, int8_t vbm_tree[25])
306 {
307  static const uint8_t bits[] = {
308  2, 2, 2, 4, 4, 4,
309  6, 6, 6, 8, 8, 8,
310  10, 10, 10, 12, 12, 12,
311  14, 14, 14, 14
312  };
313  static const uint16_t codes[] = {
314  0x0000, 0x0001, 0x0002, // 00/01/10
315  0x000c, 0x000d, 0x000e, // 11+00/01/10
316  0x003c, 0x003d, 0x003e, // 1111+00/01/10
317  0x00fc, 0x00fd, 0x00fe, // 111111+00/01/10
318  0x03fc, 0x03fd, 0x03fe, // 11111111+00/01/10
319  0x0ffc, 0x0ffd, 0x0ffe, // 1111111111+00/01/10
320  0x3ffc, 0x3ffd, 0x3ffe, 0x3fff // 111111111111+xx
321  };
322  int cntr[8], n, res;
323 
324  memset(vbm_tree, 0xff, sizeof(vbm_tree[0]) * 25);
325  memset(cntr, 0, sizeof(cntr));
326  for (n = 0; n < 17; n++) {
327  res = get_bits(gb, 3);
328  if (cntr[res] > 3) // should be >= 3 + (res == 7))
329  return -1;
330  vbm_tree[res * 3 + cntr[res]++] = n;
331  }
332  INIT_VLC_STATIC(&frame_type_vlc, VLC_NBITS, sizeof(bits),
333  bits, 1, 1, codes, 2, 2, 132);
334  return 0;
335 }
336 
341 {
342  int n, flags, pitch_range, lsp16_flag;
343  WMAVoiceContext *s = ctx->priv_data;
344 
353  if (ctx->extradata_size != 46) {
354  av_log(ctx, AV_LOG_ERROR,
355  "Invalid extradata size %d (should be 46)\n",
356  ctx->extradata_size);
357  return -1;
358  }
359  flags = AV_RL32(ctx->extradata + 18);
360  s->spillover_bitsize = 3 + av_ceil_log2(ctx->block_align);
361  s->do_apf = flags & 0x1;
362  if (s->do_apf) {
363  ff_rdft_init(&s->rdft, 7, DFT_R2C);
364  ff_rdft_init(&s->irdft, 7, IDFT_C2R);
365  ff_dct_init(&s->dct, 6, DCT_I);
366  ff_dct_init(&s->dst, 6, DST_I);
367 
368  ff_sine_window_init(s->cos, 256);
369  memcpy(&s->sin[255], s->cos, 256 * sizeof(s->cos[0]));
370  for (n = 0; n < 255; n++) {
371  s->sin[n] = -s->sin[510 - n];
372  s->cos[510 - n] = s->cos[n];
373  }
374  }
375  s->denoise_strength = (flags >> 2) & 0xF;
376  if (s->denoise_strength >= 12) {
377  av_log(ctx, AV_LOG_ERROR,
378  "Invalid denoise filter strength %d (max=11)\n",
379  s->denoise_strength);
380  return -1;
381  }
382  s->denoise_tilt_corr = !!(flags & 0x40);
383  s->dc_level = (flags >> 7) & 0xF;
384  s->lsp_q_mode = !!(flags & 0x2000);
385  s->lsp_def_mode = !!(flags & 0x4000);
386  lsp16_flag = flags & 0x1000;
387  if (lsp16_flag) {
388  s->lsps = 16;
389  s->frame_lsp_bitsize = 34;
390  s->sframe_lsp_bitsize = 60;
391  } else {
392  s->lsps = 10;
393  s->frame_lsp_bitsize = 24;
394  s->sframe_lsp_bitsize = 48;
395  }
396  for (n = 0; n < s->lsps; n++)
397  s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);
398 
399  init_get_bits(&s->gb, ctx->extradata + 22, (ctx->extradata_size - 22) << 3);
400  if (decode_vbmtree(&s->gb, s->vbm_tree) < 0) {
401  av_log(ctx, AV_LOG_ERROR, "Invalid VBM tree; broken extradata?\n");
402  return -1;
403  }
404 
405  s->min_pitch_val = ((ctx->sample_rate << 8) / 400 + 50) >> 8;
406  s->max_pitch_val = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8;
407  pitch_range = s->max_pitch_val - s->min_pitch_val;
408  if (pitch_range <= 0) {
409  av_log(ctx, AV_LOG_ERROR, "Invalid pitch range; broken extradata?\n");
410  return -1;
411  }
412  s->pitch_nbits = av_ceil_log2(pitch_range);
413  s->last_pitch_val = 40;
415  s->history_nsamples = s->max_pitch_val + 8;
416 
418  int min_sr = ((((1 << 8) - 50) * 400) + 0xFF) >> 8,
419  max_sr = ((((MAX_SIGNAL_HISTORY - 8) << 8) + 205) * 2000 / 37) >> 8;
420 
421  av_log(ctx, AV_LOG_ERROR,
422  "Unsupported samplerate %d (min=%d, max=%d)\n",
423  ctx->sample_rate, min_sr, max_sr); // 322-22097 Hz
424 
425  return -1;
426  }
427 
428  s->block_conv_table[0] = s->min_pitch_val;
429  s->block_conv_table[1] = (pitch_range * 25) >> 6;
430  s->block_conv_table[2] = (pitch_range * 44) >> 6;
431  s->block_conv_table[3] = s->max_pitch_val - 1;
432  s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF;
433  if (s->block_delta_pitch_hrange <= 0) {
434  av_log(ctx, AV_LOG_ERROR, "Invalid delta pitch hrange; broken extradata?\n");
435  return -1;
436  }
437  s->block_delta_pitch_nbits = 1 + av_ceil_log2(s->block_delta_pitch_hrange);
439  s->block_conv_table[3] + 1 +
440  2 * (s->block_conv_table[1] - 2 * s->min_pitch_val);
441  s->block_pitch_nbits = av_ceil_log2(s->block_pitch_range);
442 
444 
446  ctx->coded_frame = &s->frame;
447 
448  return 0;
449 }
450 
472 static void adaptive_gain_control(float *out, const float *in,
473  const float *speech_synth,
474  int size, float alpha, float *gain_mem)
475 {
476  int i;
477  float speech_energy = 0.0, postfilter_energy = 0.0, gain_scale_factor;
478  float mem = *gain_mem;
479 
480  for (i = 0; i < size; i++) {
481  speech_energy += fabsf(speech_synth[i]);
482  postfilter_energy += fabsf(in[i]);
483  }
484  gain_scale_factor = (1.0 - alpha) * speech_energy / postfilter_energy;
485 
486  for (i = 0; i < size; i++) {
487  mem = alpha * mem + gain_scale_factor;
488  out[i] = in[i] * mem;
489  }
490 
491  *gain_mem = mem;
492 }
493 
512 static int kalman_smoothen(WMAVoiceContext *s, int pitch,
513  const float *in, float *out, int size)
514 {
515  int n;
516  float optimal_gain = 0, dot;
517  const float *ptr = &in[-FFMAX(s->min_pitch_val, pitch - 3)],
518  *end = &in[-FFMIN(s->max_pitch_val, pitch + 3)],
519  *best_hist_ptr;
520 
521  /* find best fitting point in history */
522  do {
523  dot = ff_dot_productf(in, ptr, size);
524  if (dot > optimal_gain) {
525  optimal_gain = dot;
526  best_hist_ptr = ptr;
527  }
528  } while (--ptr >= end);
529 
530  if (optimal_gain <= 0)
531  return -1;
532  dot = ff_dot_productf(best_hist_ptr, best_hist_ptr, size);
533  if (dot <= 0) // would be 1.0
534  return -1;
535 
536  if (optimal_gain <= dot) {
537  dot = dot / (dot + 0.6 * optimal_gain); // 0.625-1.000
538  } else
539  dot = 0.625;
540 
541  /* actual smoothing */
542  for (n = 0; n < size; n++)
543  out[n] = best_hist_ptr[n] + dot * (in[n] - best_hist_ptr[n]);
544 
545  return 0;
546 }
547 
558 static float tilt_factor(const float *lpcs, int n_lpcs)
559 {
560  float rh0, rh1;
561 
562  rh0 = 1.0 + ff_dot_productf(lpcs, lpcs, n_lpcs);
563  rh1 = lpcs[0] + ff_dot_productf(lpcs, &lpcs[1], n_lpcs - 1);
564 
565  return rh1 / rh0;
566 }
567 
571 static void calc_input_response(WMAVoiceContext *s, float *lpcs,
572  int fcb_type, float *coeffs, int remainder)
573 {
574  float last_coeff, min = 15.0, max = -15.0;
575  float irange, angle_mul, gain_mul, range, sq;
576  int n, idx;
577 
578  /* Create frequency power spectrum of speech input (i.e. RDFT of LPCs) */
579  s->rdft.rdft_calc(&s->rdft, lpcs);
580 #define log_range(var, assign) do { \
581  float tmp = log10f(assign); var = tmp; \
582  max = FFMAX(max, tmp); min = FFMIN(min, tmp); \
583  } while (0)
584  log_range(last_coeff, lpcs[1] * lpcs[1]);
585  for (n = 1; n < 64; n++)
586  log_range(lpcs[n], lpcs[n * 2] * lpcs[n * 2] +
587  lpcs[n * 2 + 1] * lpcs[n * 2 + 1]);
588  log_range(lpcs[0], lpcs[0] * lpcs[0]);
589 #undef log_range
590  range = max - min;
591  lpcs[64] = last_coeff;
592 
593  /* Now, use this spectrum to pick out these frequencies with higher
594  * (relative) power/energy (which we then take to be "not noise"),
595  * and set up a table (still in lpc[]) of (relative) gains per frequency.
596  * These frequencies will be maintained, while others ("noise") will be
597  * decreased in the filter output. */
598  irange = 64.0 / range; // so irange*(max-value) is in the range [0, 63]
599  gain_mul = range * (fcb_type == FCB_TYPE_HARDCODED ? (5.0 / 13.0) :
600  (5.0 / 14.7));
601  angle_mul = gain_mul * (8.0 * M_LN10 / M_PI);
602  for (n = 0; n <= 64; n++) {
603  float pwr;
604 
605  idx = FFMAX(0, lrint((max - lpcs[n]) * irange) - 1);
607  lpcs[n] = angle_mul * pwr;
608 
609  /* 70.57 =~ 1/log10(1.0331663) */
610  idx = (pwr * gain_mul - 0.0295) * 70.570526123;
611  if (idx > 127) { // fallback if index falls outside table range
612  coeffs[n] = wmavoice_energy_table[127] *
613  powf(1.0331663, idx - 127);
614  } else
615  coeffs[n] = wmavoice_energy_table[FFMAX(0, idx)];
616  }
617 
618  /* calculate the Hilbert transform of the gains, which we do (since this
619  * is a sinus input) by doing a phase shift (in theory, H(sin())=cos()).
620  * Hilbert_Transform(RDFT(x)) = Laplace_Transform(x), which calculates the
621  * "moment" of the LPCs in this filter. */
622  s->dct.dct_calc(&s->dct, lpcs);
623  s->dst.dct_calc(&s->dst, lpcs);
624 
625  /* Split out the coefficient indexes into phase/magnitude pairs */
626  idx = 255 + av_clip(lpcs[64], -255, 255);
627  coeffs[0] = coeffs[0] * s->cos[idx];
628  idx = 255 + av_clip(lpcs[64] - 2 * lpcs[63], -255, 255);
629  last_coeff = coeffs[64] * s->cos[idx];
630  for (n = 63;; n--) {
631  idx = 255 + av_clip(-lpcs[64] - 2 * lpcs[n - 1], -255, 255);
632  coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
633  coeffs[n * 2] = coeffs[n] * s->cos[idx];
634 
635  if (!--n) break;
636 
637  idx = 255 + av_clip( lpcs[64] - 2 * lpcs[n - 1], -255, 255);
638  coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx];
639  coeffs[n * 2] = coeffs[n] * s->cos[idx];
640  }
641  coeffs[1] = last_coeff;
642 
643  /* move into real domain */
644  s->irdft.rdft_calc(&s->irdft, coeffs);
645 
646  /* tilt correction and normalize scale */
647  memset(&coeffs[remainder], 0, sizeof(coeffs[0]) * (128 - remainder));
648  if (s->denoise_tilt_corr) {
649  float tilt_mem = 0;
650 
651  coeffs[remainder - 1] = 0;
652  ff_tilt_compensation(&tilt_mem,
653  -1.8 * tilt_factor(coeffs, remainder - 1),
654  coeffs, remainder);
655  }
656  sq = (1.0 / 64.0) * sqrtf(1 / ff_dot_productf(coeffs, coeffs, remainder));
657  for (n = 0; n < remainder; n++)
658  coeffs[n] *= sq;
659 }
660 
687 static void wiener_denoise(WMAVoiceContext *s, int fcb_type,
688  float *synth_pf, int size,
689  const float *lpcs)
690 {
691  int remainder, lim, n;
692 
693  if (fcb_type != FCB_TYPE_SILENCE) {
694  float *tilted_lpcs = s->tilted_lpcs_pf,
695  *coeffs = s->denoise_coeffs_pf, tilt_mem = 0;
696 
697  tilted_lpcs[0] = 1.0;
698  memcpy(&tilted_lpcs[1], lpcs, sizeof(lpcs[0]) * s->lsps);
699  memset(&tilted_lpcs[s->lsps + 1], 0,
700  sizeof(tilted_lpcs[0]) * (128 - s->lsps - 1));
701  ff_tilt_compensation(&tilt_mem, 0.7 * tilt_factor(lpcs, s->lsps),
702  tilted_lpcs, s->lsps + 2);
703 
704  /* The IRDFT output (127 samples for 7-bit filter) beyond the frame
705  * size is applied to the next frame. All input beyond this is zero,
706  * and thus all output beyond this will go towards zero, hence we can
707  * limit to min(size-1, 127-size) as a performance consideration. */
708  remainder = FFMIN(127 - size, size - 1);
709  calc_input_response(s, tilted_lpcs, fcb_type, coeffs, remainder);
710 
711  /* apply coefficients (in frequency spectrum domain), i.e. complex
712  * number multiplication */
713  memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size));
714  s->rdft.rdft_calc(&s->rdft, synth_pf);
715  s->rdft.rdft_calc(&s->rdft, coeffs);
716  synth_pf[0] *= coeffs[0];
717  synth_pf[1] *= coeffs[1];
718  for (n = 1; n < 64; n++) {
719  float v1 = synth_pf[n * 2], v2 = synth_pf[n * 2 + 1];
720  synth_pf[n * 2] = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1];
721  synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1];
722  }
723  s->irdft.rdft_calc(&s->irdft, synth_pf);
724  }
725 
726  /* merge filter output with the history of previous runs */
727  if (s->denoise_filter_cache_size) {
728  lim = FFMIN(s->denoise_filter_cache_size, size);
729  for (n = 0; n < lim; n++)
730  synth_pf[n] += s->denoise_filter_cache[n];
731  s->denoise_filter_cache_size -= lim;
732  memmove(s->denoise_filter_cache, &s->denoise_filter_cache[size],
734  }
735 
736  /* move remainder of filter output into a cache for future runs */
737  if (fcb_type != FCB_TYPE_SILENCE) {
738  lim = FFMIN(remainder, s->denoise_filter_cache_size);
739  for (n = 0; n < lim; n++)
740  s->denoise_filter_cache[n] += synth_pf[size + n];
741  if (lim < remainder) {
742  memcpy(&s->denoise_filter_cache[lim], &synth_pf[size + lim],
743  sizeof(s->denoise_filter_cache[0]) * (remainder - lim));
744  s->denoise_filter_cache_size = remainder;
745  }
746  }
747 }
748 
769 static void postfilter(WMAVoiceContext *s, const float *synth,
770  float *samples, int size,
771  const float *lpcs, float *zero_exc_pf,
772  int fcb_type, int pitch)
773 {
774  float synth_filter_in_buf[MAX_FRAMESIZE / 2],
775  *synth_pf = &s->synth_filter_out_buf[MAX_LSPS_ALIGN16],
776  *synth_filter_in = zero_exc_pf;
777 
778  assert(size <= MAX_FRAMESIZE / 2);
779 
780  /* generate excitation from input signal */
781  ff_celp_lp_zero_synthesis_filterf(zero_exc_pf, lpcs, synth, size, s->lsps);
782 
783  if (fcb_type >= FCB_TYPE_AW_PULSES &&
784  !kalman_smoothen(s, pitch, zero_exc_pf, synth_filter_in_buf, size))
785  synth_filter_in = synth_filter_in_buf;
786 
787  /* re-synthesize speech after smoothening, and keep history */
788  ff_celp_lp_synthesis_filterf(synth_pf, lpcs,
789  synth_filter_in, size, s->lsps);
790  memcpy(&synth_pf[-s->lsps], &synth_pf[size - s->lsps],
791  sizeof(synth_pf[0]) * s->lsps);
792 
793  wiener_denoise(s, fcb_type, synth_pf, size, lpcs);
794 
795  adaptive_gain_control(samples, synth_pf, synth, size, 0.99,
796  &s->postfilter_agc);
797 
798  if (s->dc_level > 8) {
799  /* remove ultra-low frequency DC noise / highpass filter;
800  * coefficients are identical to those used in SIPR decoding,
801  * and very closely resemble those used in AMR-NB decoding. */
803  (const float[2]) { -1.99997, 1.0 },
804  (const float[2]) { -1.9330735188, 0.93589198496 },
805  0.93980580475, s->dcf_mem, size);
806  }
807 }
823 static void dequant_lsps(double *lsps, int num,
824  const uint16_t *values,
825  const uint16_t *sizes,
826  int n_stages, const uint8_t *table,
827  const double *mul_q,
828  const double *base_q)
829 {
830  int n, m;
831 
832  memset(lsps, 0, num * sizeof(*lsps));
833  for (n = 0; n < n_stages; n++) {
834  const uint8_t *t_off = &table[values[n] * num];
835  double base = base_q[n], mul = mul_q[n];
836 
837  for (m = 0; m < num; m++)
838  lsps[m] += base + mul * t_off[m];
839 
840  table += sizes[n] * num;
841  }
842 }
843 
855 static void dequant_lsp10i(GetBitContext *gb, double *lsps)
856 {
857  static const uint16_t vec_sizes[4] = { 256, 64, 32, 32 };
858  static const double mul_lsf[4] = {
859  5.2187144800e-3, 1.4626986422e-3,
860  9.6179549166e-4, 1.1325736225e-3
861  };
862  static const double base_lsf[4] = {
863  M_PI * -2.15522e-1, M_PI * -6.1646e-2,
864  M_PI * -3.3486e-2, M_PI * -5.7408e-2
865  };
866  uint16_t v[4];
867 
868  v[0] = get_bits(gb, 8);
869  v[1] = get_bits(gb, 6);
870  v[2] = get_bits(gb, 5);
871  v[3] = get_bits(gb, 5);
872 
873  dequant_lsps(lsps, 10, v, vec_sizes, 4, wmavoice_dq_lsp10i,
874  mul_lsf, base_lsf);
875 }
876 
882  double *i_lsps, const double *old,
883  double *a1, double *a2, int q_mode)
884 {
885  static const uint16_t vec_sizes[3] = { 128, 64, 64 };
886  static const double mul_lsf[3] = {
887  2.5807601174e-3, 1.2354460219e-3, 1.1763821673e-3
888  };
889  static const double base_lsf[3] = {
890  M_PI * -1.07448e-1, M_PI * -5.2706e-2, M_PI * -5.1634e-2
891  };
892  const float (*ipol_tab)[2][10] = q_mode ?
894  uint16_t interpol, v[3];
895  int n;
896 
897  dequant_lsp10i(gb, i_lsps);
898 
899  interpol = get_bits(gb, 5);
900  v[0] = get_bits(gb, 7);
901  v[1] = get_bits(gb, 6);
902  v[2] = get_bits(gb, 6);
903 
904  for (n = 0; n < 10; n++) {
905  double delta = old[n] - i_lsps[n];
906  a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n];
907  a1[10 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n];
908  }
909 
910  dequant_lsps(a2, 20, v, vec_sizes, 3, wmavoice_dq_lsp10r,
911  mul_lsf, base_lsf);
912 }
913 
917 static void dequant_lsp16i(GetBitContext *gb, double *lsps)
918 {
919  static const uint16_t vec_sizes[5] = { 256, 64, 128, 64, 128 };
920  static const double mul_lsf[5] = {
921  3.3439586280e-3, 6.9908173703e-4,
922  3.3216608306e-3, 1.0334960326e-3,
923  3.1899104283e-3
924  };
925  static const double base_lsf[5] = {
926  M_PI * -1.27576e-1, M_PI * -2.4292e-2,
927  M_PI * -1.28094e-1, M_PI * -3.2128e-2,
928  M_PI * -1.29816e-1
929  };
930  uint16_t v[5];
931 
932  v[0] = get_bits(gb, 8);
933  v[1] = get_bits(gb, 6);
934  v[2] = get_bits(gb, 7);
935  v[3] = get_bits(gb, 6);
936  v[4] = get_bits(gb, 7);
937 
938  dequant_lsps( lsps, 5, v, vec_sizes, 2,
939  wmavoice_dq_lsp16i1, mul_lsf, base_lsf);
940  dequant_lsps(&lsps[5], 5, &v[2], &vec_sizes[2], 2,
941  wmavoice_dq_lsp16i2, &mul_lsf[2], &base_lsf[2]);
942  dequant_lsps(&lsps[10], 6, &v[4], &vec_sizes[4], 1,
943  wmavoice_dq_lsp16i3, &mul_lsf[4], &base_lsf[4]);
944 }
945 
951  double *i_lsps, const double *old,
952  double *a1, double *a2, int q_mode)
953 {
954  static const uint16_t vec_sizes[3] = { 128, 128, 128 };
955  static const double mul_lsf[3] = {
956  1.2232979501e-3, 1.4062241527e-3, 1.6114744851e-3
957  };
958  static const double base_lsf[3] = {
959  M_PI * -5.5830e-2, M_PI * -5.2908e-2, M_PI * -5.4776e-2
960  };
961  const float (*ipol_tab)[2][16] = q_mode ?
963  uint16_t interpol, v[3];
964  int n;
965 
966  dequant_lsp16i(gb, i_lsps);
967 
968  interpol = get_bits(gb, 5);
969  v[0] = get_bits(gb, 7);
970  v[1] = get_bits(gb, 7);
971  v[2] = get_bits(gb, 7);
972 
973  for (n = 0; n < 16; n++) {
974  double delta = old[n] - i_lsps[n];
975  a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n];
976  a1[16 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n];
977  }
978 
979  dequant_lsps( a2, 10, v, vec_sizes, 1,
980  wmavoice_dq_lsp16r1, mul_lsf, base_lsf);
981  dequant_lsps(&a2[10], 10, &v[1], &vec_sizes[1], 1,
982  wmavoice_dq_lsp16r2, &mul_lsf[1], &base_lsf[1]);
983  dequant_lsps(&a2[20], 12, &v[2], &vec_sizes[2], 1,
984  wmavoice_dq_lsp16r3, &mul_lsf[2], &base_lsf[2]);
985 }
986 
1001  const int *pitch)
1002 {
1003  static const int16_t start_offset[94] = {
1004  -11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9, 11,
1005  13, 15, 18, 17, 19, 20, 21, 22, 23, 24, 25, 26,
1006  27, 28, 29, 30, 31, 32, 33, 35, 37, 39, 41, 43,
1007  45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67,
1008  69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91,
1009  93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115,
1010  117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139,
1011  141, 143, 145, 147, 149, 151, 153, 155, 157, 159
1012  };
1013  int bits, offset;
1014 
1015  /* position of pulse */
1016  s->aw_idx_is_ext = 0;
1017  if ((bits = get_bits(gb, 6)) >= 54) {
1018  s->aw_idx_is_ext = 1;
1019  bits += (bits - 54) * 3 + get_bits(gb, 2);
1020  }
1021 
1022  /* for a repeated pulse at pulse_off with a pitch_lag of pitch[], count
1023  * the distribution of the pulses in each block contained in this frame. */
1024  s->aw_pulse_range = FFMIN(pitch[0], pitch[1]) > 32 ? 24 : 16;
1025  for (offset = start_offset[bits]; offset < 0; offset += pitch[0]) ;
1026  s->aw_n_pulses[0] = (pitch[0] - 1 + MAX_FRAMESIZE / 2 - offset) / pitch[0];
1027  s->aw_first_pulse_off[0] = offset - s->aw_pulse_range / 2;
1028  offset += s->aw_n_pulses[0] * pitch[0];
1029  s->aw_n_pulses[1] = (pitch[1] - 1 + MAX_FRAMESIZE - offset) / pitch[1];
1030  s->aw_first_pulse_off[1] = offset - (MAX_FRAMESIZE + s->aw_pulse_range) / 2;
1031 
1032  /* if continuing from a position before the block, reset position to
1033  * start of block (when corrected for the range over which it can be
1034  * spread in aw_pulse_set1()). */
1035  if (start_offset[bits] < MAX_FRAMESIZE / 2) {
1036  while (s->aw_first_pulse_off[1] - pitch[1] + s->aw_pulse_range > 0)
1037  s->aw_first_pulse_off[1] -= pitch[1];
1038  if (start_offset[bits] < 0)
1039  while (s->aw_first_pulse_off[0] - pitch[0] + s->aw_pulse_range > 0)
1040  s->aw_first_pulse_off[0] -= pitch[0];
1041  }
1042 }
1043 
1053  int block_idx, AMRFixed *fcb)
1054 {
1055  uint16_t use_mask_mem[9]; // only 5 are used, rest is padding
1056  uint16_t *use_mask = use_mask_mem + 2;
1057  /* in this function, idx is the index in the 80-bit (+ padding) use_mask
1058  * bit-array. Since use_mask consists of 16-bit values, the lower 4 bits
1059  * of idx are the position of the bit within a particular item in the
1060  * array (0 being the most significant bit, and 15 being the least
1061  * significant bit), and the remainder (>> 4) is the index in the
1062  * use_mask[]-array. This is faster and uses less memory than using a
1063  * 80-byte/80-int array. */
1064  int pulse_off = s->aw_first_pulse_off[block_idx],
1065  pulse_start, n, idx, range, aidx, start_off = 0;
1066 
1067  /* set offset of first pulse to within this block */
1068  if (s->aw_n_pulses[block_idx] > 0)
1069  while (pulse_off + s->aw_pulse_range < 1)
1070  pulse_off += fcb->pitch_lag;
1071 
1072  /* find range per pulse */
1073  if (s->aw_n_pulses[0] > 0) {
1074  if (block_idx == 0) {
1075  range = 32;
1076  } else /* block_idx = 1 */ {
1077  range = 8;
1078  if (s->aw_n_pulses[block_idx] > 0)
1079  pulse_off = s->aw_next_pulse_off_cache;
1080  }
1081  } else
1082  range = 16;
1083  pulse_start = s->aw_n_pulses[block_idx] > 0 ? pulse_off - range / 2 : 0;
1084 
1085  /* aw_pulse_set1() already applies pulses around pulse_off (to be exactly,
1086  * in the range of [pulse_off, pulse_off + s->aw_pulse_range], and thus
1087  * we exclude that range from being pulsed again in this function. */
1088  memset(&use_mask[-2], 0, 2 * sizeof(use_mask[0]));
1089  memset( use_mask, -1, 5 * sizeof(use_mask[0]));
1090  memset(&use_mask[5], 0, 2 * sizeof(use_mask[0]));
1091  if (s->aw_n_pulses[block_idx] > 0)
1092  for (idx = pulse_off; idx < MAX_FRAMESIZE / 2; idx += fcb->pitch_lag) {
1093  int excl_range = s->aw_pulse_range; // always 16 or 24
1094  uint16_t *use_mask_ptr = &use_mask[idx >> 4];
1095  int first_sh = 16 - (idx & 15);
1096  *use_mask_ptr++ &= 0xFFFFu << first_sh;
1097  excl_range -= first_sh;
1098  if (excl_range >= 16) {
1099  *use_mask_ptr++ = 0;
1100  *use_mask_ptr &= 0xFFFF >> (excl_range - 16);
1101  } else
1102  *use_mask_ptr &= 0xFFFF >> excl_range;
1103  }
1104 
1105  /* find the 'aidx'th offset that is not excluded */
1106  aidx = get_bits(gb, s->aw_n_pulses[0] > 0 ? 5 - 2 * block_idx : 4);
1107  for (n = 0; n <= aidx; pulse_start++) {
1108  for (idx = pulse_start; idx < 0; idx += fcb->pitch_lag) ;
1109  if (idx >= MAX_FRAMESIZE / 2) { // find from zero
1110  if (use_mask[0]) idx = 0x0F;
1111  else if (use_mask[1]) idx = 0x1F;
1112  else if (use_mask[2]) idx = 0x2F;
1113  else if (use_mask[3]) idx = 0x3F;
1114  else if (use_mask[4]) idx = 0x4F;
1115  else return -1;
1116  idx -= av_log2_16bit(use_mask[idx >> 4]);
1117  }
1118  if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) {
1119  use_mask[idx >> 4] &= ~(0x8000 >> (idx & 15));
1120  n++;
1121  start_off = idx;
1122  }
1123  }
1124 
1125  fcb->x[fcb->n] = start_off;
1126  fcb->y[fcb->n] = get_bits1(gb) ? -1.0 : 1.0;
1127  fcb->n++;
1128 
1129  /* set offset for next block, relative to start of that block */
1130  n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag;
1131  s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0;
1132  return 0;
1133 }
1134 
1143  int block_idx, AMRFixed *fcb)
1144 {
1145  int val = get_bits(gb, 12 - 2 * (s->aw_idx_is_ext && !block_idx));
1146  float v;
1147 
1148  if (s->aw_n_pulses[block_idx] > 0) {
1149  int n, v_mask, i_mask, sh, n_pulses;
1150 
1151  if (s->aw_pulse_range == 24) { // 3 pulses, 1:sign + 3:index each
1152  n_pulses = 3;
1153  v_mask = 8;
1154  i_mask = 7;
1155  sh = 4;
1156  } else { // 4 pulses, 1:sign + 2:index each
1157  n_pulses = 4;
1158  v_mask = 4;
1159  i_mask = 3;
1160  sh = 3;
1161  }
1162 
1163  for (n = n_pulses - 1; n >= 0; n--, val >>= sh) {
1164  fcb->y[fcb->n] = (val & v_mask) ? -1.0 : 1.0;
1165  fcb->x[fcb->n] = (val & i_mask) * n_pulses + n +
1166  s->aw_first_pulse_off[block_idx];
1167  while (fcb->x[fcb->n] < 0)
1168  fcb->x[fcb->n] += fcb->pitch_lag;
1169  if (fcb->x[fcb->n] < MAX_FRAMESIZE / 2)
1170  fcb->n++;
1171  }
1172  } else {
1173  int num2 = (val & 0x1FF) >> 1, delta, idx;
1174 
1175  if (num2 < 1 * 79) { delta = 1; idx = num2 + 1; }
1176  else if (num2 < 2 * 78) { delta = 3; idx = num2 + 1 - 1 * 77; }
1177  else if (num2 < 3 * 77) { delta = 5; idx = num2 + 1 - 2 * 76; }
1178  else { delta = 7; idx = num2 + 1 - 3 * 75; }
1179  v = (val & 0x200) ? -1.0 : 1.0;
1180 
1181  fcb->no_repeat_mask |= 3 << fcb->n;
1182  fcb->x[fcb->n] = idx - delta;
1183  fcb->y[fcb->n] = v;
1184  fcb->x[fcb->n + 1] = idx;
1185  fcb->y[fcb->n + 1] = (val & 1) ? -v : v;
1186  fcb->n += 2;
1187  }
1188 }
1189 
1203 static int pRNG(int frame_cntr, int block_num, int block_size)
1204 {
1205  /* array to simplify the calculation of z:
1206  * y = (x % 9) * 5 + 6;
1207  * z = (49995 * x) / y;
1208  * Since y only has 9 values, we can remove the division by using a
1209  * LUT and using FASTDIV-style divisions. For each of the 9 values
1210  * of y, we can rewrite z as:
1211  * z = x * (49995 / y) + x * ((49995 % y) / y)
1212  * In this table, each col represents one possible value of y, the
1213  * first number is 49995 / y, and the second is the FASTDIV variant
1214  * of 49995 % y / y. */
1215  static const unsigned int div_tbl[9][2] = {
1216  { 8332, 3 * 715827883U }, // y = 6
1217  { 4545, 0 * 390451573U }, // y = 11
1218  { 3124, 11 * 268435456U }, // y = 16
1219  { 2380, 15 * 204522253U }, // y = 21
1220  { 1922, 23 * 165191050U }, // y = 26
1221  { 1612, 23 * 138547333U }, // y = 31
1222  { 1388, 27 * 119304648U }, // y = 36
1223  { 1219, 16 * 104755300U }, // y = 41
1224  { 1086, 39 * 93368855U } // y = 46
1225  };
1226  unsigned int z, y, x = MUL16(block_num, 1877) + frame_cntr;
1227  if (x >= 0xFFFF) x -= 0xFFFF; // max value of x is 8*1877+0xFFFE=0x13AA6,
1228  // so this is effectively a modulo (%)
1229  y = x - 9 * MULH(477218589, x); // x % 9
1230  z = (uint16_t) (x * div_tbl[y][0] + UMULH(x, div_tbl[y][1]));
1231  // z = x * 49995 / (y * 5 + 6)
1232  return z % (1000 - block_size);
1233 }
1234 
1240  int block_idx, int size,
1241  const struct frame_type_desc *frame_desc,
1242  float *excitation)
1243 {
1244  float gain;
1245  int n, r_idx;
1246 
1247  assert(size <= MAX_FRAMESIZE);
1248 
1249  /* Set the offset from which we start reading wmavoice_std_codebook */
1250  if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
1251  r_idx = pRNG(s->frame_cntr, block_idx, size);
1252  gain = s->silence_gain;
1253  } else /* FCB_TYPE_HARDCODED */ {
1254  r_idx = get_bits(gb, 8);
1255  gain = wmavoice_gain_universal[get_bits(gb, 6)];
1256  }
1257 
1258  /* Clear gain prediction parameters */
1259  memset(s->gain_pred_err, 0, sizeof(s->gain_pred_err));
1260 
1261  /* Apply gain to hardcoded codebook and use that as excitation signal */
1262  for (n = 0; n < size; n++)
1263  excitation[n] = wmavoice_std_codebook[r_idx + n] * gain;
1264 }
1265 
1271  int block_idx, int size,
1272  int block_pitch_sh2,
1273  const struct frame_type_desc *frame_desc,
1274  float *excitation)
1275 {
1276  static const float gain_coeff[6] = {
1277  0.8169, -0.06545, 0.1726, 0.0185, -0.0359, 0.0458
1278  };
1279  float pulses[MAX_FRAMESIZE / 2], pred_err, acb_gain, fcb_gain;
1280  int n, idx, gain_weight;
1281  AMRFixed fcb;
1282 
1283  assert(size <= MAX_FRAMESIZE / 2);
1284  memset(pulses, 0, sizeof(*pulses) * size);
1285 
1286  fcb.pitch_lag = block_pitch_sh2 >> 2;
1287  fcb.pitch_fac = 1.0;
1288  fcb.no_repeat_mask = 0;
1289  fcb.n = 0;
1290 
1291  /* For the other frame types, this is where we apply the innovation
1292  * (fixed) codebook pulses of the speech signal. */
1293  if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
1294  aw_pulse_set1(s, gb, block_idx, &fcb);
1295  if (aw_pulse_set2(s, gb, block_idx, &fcb)) {
1296  /* Conceal the block with silence and return.
1297  * Skip the correct amount of bits to read the next
1298  * block from the correct offset. */
1299  int r_idx = pRNG(s->frame_cntr, block_idx, size);
1300 
1301  for (n = 0; n < size; n++)
1302  excitation[n] =
1303  wmavoice_std_codebook[r_idx + n] * s->silence_gain;
1304  skip_bits(gb, 7 + 1);
1305  return;
1306  }
1307  } else /* FCB_TYPE_EXC_PULSES */ {
1308  int offset_nbits = 5 - frame_desc->log_n_blocks;
1309 
1310  fcb.no_repeat_mask = -1;
1311  /* similar to ff_decode_10_pulses_35bits(), but with single pulses
1312  * (instead of double) for a subset of pulses */
1313  for (n = 0; n < 5; n++) {
1314  float sign;
1315  int pos1, pos2;
1316 
1317  sign = get_bits1(gb) ? 1.0 : -1.0;
1318  pos1 = get_bits(gb, offset_nbits);
1319  fcb.x[fcb.n] = n + 5 * pos1;
1320  fcb.y[fcb.n++] = sign;
1321  if (n < frame_desc->dbl_pulses) {
1322  pos2 = get_bits(gb, offset_nbits);
1323  fcb.x[fcb.n] = n + 5 * pos2;
1324  fcb.y[fcb.n++] = (pos1 < pos2) ? -sign : sign;
1325  }
1326  }
1327  }
1328  ff_set_fixed_vector(pulses, &fcb, 1.0, size);
1329 
1330  /* Calculate gain for adaptive & fixed codebook signal.
1331  * see ff_amr_set_fixed_gain(). */
1332  idx = get_bits(gb, 7);
1333  fcb_gain = expf(ff_dot_productf(s->gain_pred_err, gain_coeff, 6) -
1334  5.2409161640 + wmavoice_gain_codebook_fcb[idx]);
1335  acb_gain = wmavoice_gain_codebook_acb[idx];
1336  pred_err = av_clipf(wmavoice_gain_codebook_fcb[idx],
1337  -2.9957322736 /* log(0.05) */,
1338  1.6094379124 /* log(5.0) */);
1339 
1340  gain_weight = 8 >> frame_desc->log_n_blocks;
1341  memmove(&s->gain_pred_err[gain_weight], s->gain_pred_err,
1342  sizeof(*s->gain_pred_err) * (6 - gain_weight));
1343  for (n = 0; n < gain_weight; n++)
1344  s->gain_pred_err[n] = pred_err;
1345 
1346  /* Calculation of adaptive codebook */
1347  if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
1348  int len;
1349  for (n = 0; n < size; n += len) {
1350  int next_idx_sh16;
1351  int abs_idx = block_idx * size + n;
1352  int pitch_sh16 = (s->last_pitch_val << 16) +
1353  s->pitch_diff_sh16 * abs_idx;
1354  int pitch = (pitch_sh16 + 0x6FFF) >> 16;
1355  int idx_sh16 = ((pitch << 16) - pitch_sh16) * 8 + 0x58000;
1356  idx = idx_sh16 >> 16;
1357  if (s->pitch_diff_sh16) {
1358  if (s->pitch_diff_sh16 > 0) {
1359  next_idx_sh16 = (idx_sh16) &~ 0xFFFF;
1360  } else
1361  next_idx_sh16 = (idx_sh16 + 0x10000) &~ 0xFFFF;
1362  len = av_clip((idx_sh16 - next_idx_sh16) / s->pitch_diff_sh16 / 8,
1363  1, size - n);
1364  } else
1365  len = size;
1366 
1367  ff_acelp_interpolatef(&excitation[n], &excitation[n - pitch],
1369  idx, 9, len);
1370  }
1371  } else /* ACB_TYPE_HAMMING */ {
1372  int block_pitch = block_pitch_sh2 >> 2;
1373  idx = block_pitch_sh2 & 3;
1374  if (idx) {
1375  ff_acelp_interpolatef(excitation, &excitation[-block_pitch],
1377  idx, 8, size);
1378  } else
1379  av_memcpy_backptr((uint8_t *) excitation, sizeof(float) * block_pitch,
1380  sizeof(float) * size);
1381  }
1382 
1383  /* Interpolate ACB/FCB and use as excitation signal */
1384  ff_weighted_vector_sumf(excitation, excitation, pulses,
1385  acb_gain, fcb_gain, size);
1386 }
1387 
1405  int block_idx, int size,
1406  int block_pitch_sh2,
1407  const double *lsps, const double *prev_lsps,
1408  const struct frame_type_desc *frame_desc,
1409  float *excitation, float *synth)
1410 {
1411  double i_lsps[MAX_LSPS];
1412  float lpcs[MAX_LSPS];
1413  float fac;
1414  int n;
1415 
1416  if (frame_desc->acb_type == ACB_TYPE_NONE)
1417  synth_block_hardcoded(s, gb, block_idx, size, frame_desc, excitation);
1418  else
1419  synth_block_fcb_acb(s, gb, block_idx, size, block_pitch_sh2,
1420  frame_desc, excitation);
1421 
1422  /* convert interpolated LSPs to LPCs */
1423  fac = (block_idx + 0.5) / frame_desc->n_blocks;
1424  for (n = 0; n < s->lsps; n++) // LSF -> LSP
1425  i_lsps[n] = cos(prev_lsps[n] + fac * (lsps[n] - prev_lsps[n]));
1426  ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1427 
1428  /* Speech synthesis */
1429  ff_celp_lp_synthesis_filterf(synth, lpcs, excitation, size, s->lsps);
1430 }
1431 
1447 static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
1448  float *samples,
1449  const double *lsps, const double *prev_lsps,
1450  float *excitation, float *synth)
1451 {
1452  WMAVoiceContext *s = ctx->priv_data;
1453  int n, n_blocks_x2, log_n_blocks_x2, cur_pitch_val;
1454  int pitch[MAX_BLOCKS], last_block_pitch;
1455 
1456  /* Parse frame type ("frame header"), see frame_descs */
1457  int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], block_nsamples;
1458 
1459  if (bd_idx < 0) {
1460  av_log(ctx, AV_LOG_ERROR,
1461  "Invalid frame type VLC code, skipping\n");
1462  return -1;
1463  }
1464 
1465  block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks;
1466 
1467  /* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */
1468  if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) {
1469  /* Pitch is provided per frame, which is interpreted as the pitch of
1470  * the last sample of the last block of this frame. We can interpolate
1471  * the pitch of other blocks (and even pitch-per-sample) by gradually
1472  * incrementing/decrementing prev_frame_pitch to cur_pitch_val. */
1473  n_blocks_x2 = frame_descs[bd_idx].n_blocks << 1;
1474  log_n_blocks_x2 = frame_descs[bd_idx].log_n_blocks + 1;
1475  cur_pitch_val = s->min_pitch_val + get_bits(gb, s->pitch_nbits);
1476  cur_pitch_val = FFMIN(cur_pitch_val, s->max_pitch_val - 1);
1477  if (s->last_acb_type == ACB_TYPE_NONE ||
1478  20 * abs(cur_pitch_val - s->last_pitch_val) >
1479  (cur_pitch_val + s->last_pitch_val))
1480  s->last_pitch_val = cur_pitch_val;
1481 
1482  /* pitch per block */
1483  for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
1484  int fac = n * 2 + 1;
1485 
1486  pitch[n] = (MUL16(fac, cur_pitch_val) +
1487  MUL16((n_blocks_x2 - fac), s->last_pitch_val) +
1488  frame_descs[bd_idx].n_blocks) >> log_n_blocks_x2;
1489  }
1490 
1491  /* "pitch-diff-per-sample" for calculation of pitch per sample */
1492  s->pitch_diff_sh16 =
1493  ((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE;
1494  }
1495 
1496  /* Global gain (if silence) and pitch-adaptive window coordinates */
1497  switch (frame_descs[bd_idx].fcb_type) {
1498  case FCB_TYPE_SILENCE:
1500  break;
1501  case FCB_TYPE_AW_PULSES:
1502  aw_parse_coords(s, gb, pitch);
1503  break;
1504  }
1505 
1506  for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) {
1507  int bl_pitch_sh2;
1508 
1509  /* Pitch calculation for ACB_TYPE_HAMMING ("pitch-per-block") */
1510  switch (frame_descs[bd_idx].acb_type) {
1511  case ACB_TYPE_HAMMING: {
1512  /* Pitch is given per block. Per-block pitches are encoded as an
1513  * absolute value for the first block, and then delta values
1514  * relative to this value) for all subsequent blocks. The scale of
1515  * this pitch value is semi-logaritmic compared to its use in the
1516  * decoder, so we convert it to normal scale also. */
1517  int block_pitch,
1518  t1 = (s->block_conv_table[1] - s->block_conv_table[0]) << 2,
1519  t2 = (s->block_conv_table[2] - s->block_conv_table[1]) << 1,
1520  t3 = s->block_conv_table[3] - s->block_conv_table[2] + 1;
1521 
1522  if (n == 0) {
1523  block_pitch = get_bits(gb, s->block_pitch_nbits);
1524  } else
1525  block_pitch = last_block_pitch - s->block_delta_pitch_hrange +
1527  /* Convert last_ so that any next delta is within _range */
1528  last_block_pitch = av_clip(block_pitch,
1530  s->block_pitch_range -
1532 
1533  /* Convert semi-log-style scale back to normal scale */
1534  if (block_pitch < t1) {
1535  bl_pitch_sh2 = (s->block_conv_table[0] << 2) + block_pitch;
1536  } else {
1537  block_pitch -= t1;
1538  if (block_pitch < t2) {
1539  bl_pitch_sh2 =
1540  (s->block_conv_table[1] << 2) + (block_pitch << 1);
1541  } else {
1542  block_pitch -= t2;
1543  if (block_pitch < t3) {
1544  bl_pitch_sh2 =
1545  (s->block_conv_table[2] + block_pitch) << 2;
1546  } else
1547  bl_pitch_sh2 = s->block_conv_table[3] << 2;
1548  }
1549  }
1550  pitch[n] = bl_pitch_sh2 >> 2;
1551  break;
1552  }
1553 
1554  case ACB_TYPE_ASYMMETRIC: {
1555  bl_pitch_sh2 = pitch[n] << 2;
1556  break;
1557  }
1558 
1559  default: // ACB_TYPE_NONE has no pitch
1560  bl_pitch_sh2 = 0;
1561  break;
1562  }
1563 
1564  synth_block(s, gb, n, block_nsamples, bl_pitch_sh2,
1565  lsps, prev_lsps, &frame_descs[bd_idx],
1566  &excitation[n * block_nsamples],
1567  &synth[n * block_nsamples]);
1568  }
1569 
1570  /* Averaging projection filter, if applicable. Else, just copy samples
1571  * from synthesis buffer */
1572  if (s->do_apf) {
1573  double i_lsps[MAX_LSPS];
1574  float lpcs[MAX_LSPS];
1575 
1576  for (n = 0; n < s->lsps; n++) // LSF -> LSP
1577  i_lsps[n] = cos(0.5 * (prev_lsps[n] + lsps[n]));
1578  ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1579  postfilter(s, synth, samples, 80, lpcs,
1580  &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx],
1581  frame_descs[bd_idx].fcb_type, pitch[0]);
1582 
1583  for (n = 0; n < s->lsps; n++) // LSF -> LSP
1584  i_lsps[n] = cos(lsps[n]);
1585  ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1);
1586  postfilter(s, &synth[80], &samples[80], 80, lpcs,
1587  &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx + 80],
1588  frame_descs[bd_idx].fcb_type, pitch[0]);
1589  } else
1590  memcpy(samples, synth, 160 * sizeof(synth[0]));
1591 
1592  /* Cache values for next frame */
1593  s->frame_cntr++;
1594  if (s->frame_cntr >= 0xFFFF) s->frame_cntr -= 0xFFFF; // i.e. modulo (%)
1595  s->last_acb_type = frame_descs[bd_idx].acb_type;
1596  switch (frame_descs[bd_idx].acb_type) {
1597  case ACB_TYPE_NONE:
1598  s->last_pitch_val = 0;
1599  break;
1600  case ACB_TYPE_ASYMMETRIC:
1601  s->last_pitch_val = cur_pitch_val;
1602  break;
1603  case ACB_TYPE_HAMMING:
1604  s->last_pitch_val = pitch[frame_descs[bd_idx].n_blocks - 1];
1605  break;
1606  }
1607 
1608  return 0;
1609 }
1610 
1623 static void stabilize_lsps(double *lsps, int num)
1624 {
1625  int n, m, l;
1626 
1627  /* set minimum value for first, maximum value for last and minimum
1628  * spacing between LSF values.
1629  * Very similar to ff_set_min_dist_lsf(), but in double. */
1630  lsps[0] = FFMAX(lsps[0], 0.0015 * M_PI);
1631  for (n = 1; n < num; n++)
1632  lsps[n] = FFMAX(lsps[n], lsps[n - 1] + 0.0125 * M_PI);
1633  lsps[num - 1] = FFMIN(lsps[num - 1], 0.9985 * M_PI);
1634 
1635  /* reorder (looks like one-time / non-recursed bubblesort).
1636  * Very similar to ff_sort_nearly_sorted_floats(), but in double. */
1637  for (n = 1; n < num; n++) {
1638  if (lsps[n] < lsps[n - 1]) {
1639  for (m = 1; m < num; m++) {
1640  double tmp = lsps[m];
1641  for (l = m - 1; l >= 0; l--) {
1642  if (lsps[l] <= tmp) break;
1643  lsps[l + 1] = lsps[l];
1644  }
1645  lsps[l + 1] = tmp;
1646  }
1647  break;
1648  }
1649  }
1650 }
1651 
1662  WMAVoiceContext *s)
1663 {
1664  GetBitContext s_gb, *gb = &s_gb;
1665  int n, need_bits, bd_idx;
1666  const struct frame_type_desc *frame_desc;
1667 
1668  /* initialize a copy */
1669  init_get_bits(gb, orig_gb->buffer, orig_gb->size_in_bits);
1670  skip_bits_long(gb, get_bits_count(orig_gb));
1671  assert(get_bits_left(gb) == get_bits_left(orig_gb));
1672 
1673  /* superframe header */
1674  if (get_bits_left(gb) < 14)
1675  return 1;
1676  if (!get_bits1(gb))
1677  return -1; // WMAPro-in-WMAVoice superframe
1678  if (get_bits1(gb)) skip_bits(gb, 12); // number of samples in superframe
1679  if (s->has_residual_lsps) { // residual LSPs (for all frames)
1680  if (get_bits_left(gb) < s->sframe_lsp_bitsize)
1681  return 1;
1683  }
1684 
1685  /* frames */
1686  for (n = 0; n < MAX_FRAMES; n++) {
1687  int aw_idx_is_ext = 0;
1688 
1689  if (!s->has_residual_lsps) { // independent LSPs (per-frame)
1690  if (get_bits_left(gb) < s->frame_lsp_bitsize) return 1;
1692  }
1693  bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)];
1694  if (bd_idx < 0)
1695  return -1; // invalid frame type VLC code
1696  frame_desc = &frame_descs[bd_idx];
1697  if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) {
1698  if (get_bits_left(gb) < s->pitch_nbits)
1699  return 1;
1700  skip_bits_long(gb, s->pitch_nbits);
1701  }
1702  if (frame_desc->fcb_type == FCB_TYPE_SILENCE) {
1703  skip_bits(gb, 8);
1704  } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
1705  int tmp = get_bits(gb, 6);
1706  if (tmp >= 0x36) {
1707  skip_bits(gb, 2);
1708  aw_idx_is_ext = 1;
1709  }
1710  }
1711 
1712  /* blocks */
1713  if (frame_desc->acb_type == ACB_TYPE_HAMMING) {
1714  need_bits = s->block_pitch_nbits +
1715  (frame_desc->n_blocks - 1) * s->block_delta_pitch_nbits;
1716  } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
1717  need_bits = 2 * !aw_idx_is_ext;
1718  } else
1719  need_bits = 0;
1720  need_bits += frame_desc->frame_size;
1721  if (get_bits_left(gb) < need_bits)
1722  return 1;
1723  skip_bits_long(gb, need_bits);
1724  }
1725 
1726  return 0;
1727 }
1728 
1749 static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
1750 {
1751  WMAVoiceContext *s = ctx->priv_data;
1752  GetBitContext *gb = &s->gb, s_gb;
1753  int n, res, n_samples = 480;
1754  double lsps[MAX_FRAMES][MAX_LSPS];
1755  const double *mean_lsf = s->lsps == 16 ?
1757  float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
1758  float synth[MAX_LSPS + MAX_SFRAMESIZE];
1759  float *samples;
1760 
1761  memcpy(synth, s->synth_history,
1762  s->lsps * sizeof(*synth));
1763  memcpy(excitation, s->excitation_history,
1764  s->history_nsamples * sizeof(*excitation));
1765 
1766  if (s->sframe_cache_size > 0) {
1767  gb = &s_gb;
1769  s->sframe_cache_size = 0;
1770  }
1771 
1772  if ((res = check_bits_for_superframe(gb, s)) == 1) {
1773  *got_frame_ptr = 0;
1774  return 1;
1775  }
1776 
1777  /* First bit is speech/music bit, it differentiates between WMAVoice
1778  * speech samples (the actual codec) and WMAVoice music samples, which
1779  * are really WMAPro-in-WMAVoice-superframes. I've never seen those in
1780  * the wild yet. */
1781  if (!get_bits1(gb)) {
1782  av_log_missing_feature(ctx, "WMAPro-in-WMAVoice support", 1);
1783  return -1;
1784  }
1785 
1786  /* (optional) nr. of samples in superframe; always <= 480 and >= 0 */
1787  if (get_bits1(gb)) {
1788  if ((n_samples = get_bits(gb, 12)) > 480) {
1789  av_log(ctx, AV_LOG_ERROR,
1790  "Superframe encodes >480 samples (%d), not allowed\n",
1791  n_samples);
1792  return -1;
1793  }
1794  }
1795  /* Parse LSPs, if global for the superframe (can also be per-frame). */
1796  if (s->has_residual_lsps) {
1797  double prev_lsps[MAX_LSPS], a1[MAX_LSPS * 2], a2[MAX_LSPS * 2];
1798 
1799  for (n = 0; n < s->lsps; n++)
1800  prev_lsps[n] = s->prev_lsps[n] - mean_lsf[n];
1801 
1802  if (s->lsps == 10) {
1803  dequant_lsp10r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);
1804  } else /* s->lsps == 16 */
1805  dequant_lsp16r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode);
1806 
1807  for (n = 0; n < s->lsps; n++) {
1808  lsps[0][n] = mean_lsf[n] + (a1[n] - a2[n * 2]);
1809  lsps[1][n] = mean_lsf[n] + (a1[s->lsps + n] - a2[n * 2 + 1]);
1810  lsps[2][n] += mean_lsf[n];
1811  }
1812  for (n = 0; n < 3; n++)
1813  stabilize_lsps(lsps[n], s->lsps);
1814  }
1815 
1816  /* get output buffer */
1817  s->frame.nb_samples = 480;
1818  if ((res = ff_get_buffer(ctx, &s->frame)) < 0) {
1819  av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
1820  return res;
1821  }
1822  s->frame.nb_samples = n_samples;
1823  samples = (float *)s->frame.data[0];
1824 
1825  /* Parse frames, optionally preceded by per-frame (independent) LSPs. */
1826  for (n = 0; n < 3; n++) {
1827  if (!s->has_residual_lsps) {
1828  int m;
1829 
1830  if (s->lsps == 10) {
1831  dequant_lsp10i(gb, lsps[n]);
1832  } else /* s->lsps == 16 */
1833  dequant_lsp16i(gb, lsps[n]);
1834 
1835  for (m = 0; m < s->lsps; m++)
1836  lsps[n][m] += mean_lsf[m];
1837  stabilize_lsps(lsps[n], s->lsps);
1838  }
1839 
1840  if ((res = synth_frame(ctx, gb, n,
1841  &samples[n * MAX_FRAMESIZE],
1842  lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
1843  &excitation[s->history_nsamples + n * MAX_FRAMESIZE],
1844  &synth[s->lsps + n * MAX_FRAMESIZE]))) {
1845  *got_frame_ptr = 0;
1846  return res;
1847  }
1848  }
1849 
1850  /* Statistics? FIXME - we don't check for length, a slight overrun
1851  * will be caught by internal buffer padding, and anything else
1852  * will be skipped, not read. */
1853  if (get_bits1(gb)) {
1854  res = get_bits(gb, 4);
1855  skip_bits(gb, 10 * (res + 1));
1856  }
1857 
1858  *got_frame_ptr = 1;
1859 
1860  /* Update history */
1861  memcpy(s->prev_lsps, lsps[2],
1862  s->lsps * sizeof(*s->prev_lsps));
1863  memcpy(s->synth_history, &synth[MAX_SFRAMESIZE],
1864  s->lsps * sizeof(*synth));
1865  memcpy(s->excitation_history, &excitation[MAX_SFRAMESIZE],
1866  s->history_nsamples * sizeof(*excitation));
1867  if (s->do_apf)
1868  memmove(s->zero_exc_pf, &s->zero_exc_pf[MAX_SFRAMESIZE],
1869  s->history_nsamples * sizeof(*s->zero_exc_pf));
1870 
1871  return 0;
1872 }
1873 
1882 {
1883  GetBitContext *gb = &s->gb;
1884  unsigned int res;
1885 
1886  if (get_bits_left(gb) < 11)
1887  return 1;
1888  skip_bits(gb, 4); // packet sequence number
1889  s->has_residual_lsps = get_bits1(gb);
1890  do {
1891  res = get_bits(gb, 6); // number of superframes per packet
1892  // (minus first one if there is spillover)
1893  if (get_bits_left(gb) < 6 * (res == 0x3F) + s->spillover_bitsize)
1894  return 1;
1895  } while (res == 0x3F);
1897 
1898  return 0;
1899 }
1900 
1916 static void copy_bits(PutBitContext *pb,
1917  const uint8_t *data, int size,
1918  GetBitContext *gb, int nbits)
1919 {
1920  int rmn_bytes, rmn_bits;
1921 
1922  rmn_bits = rmn_bytes = get_bits_left(gb);
1923  if (rmn_bits < nbits)
1924  return;
1925  if (nbits > pb->size_in_bits - put_bits_count(pb))
1926  return;
1927  rmn_bits &= 7; rmn_bytes >>= 3;
1928  if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0)
1929  put_bits(pb, rmn_bits, get_bits(gb, rmn_bits));
1930  avpriv_copy_bits(pb, data + size - rmn_bytes,
1931  FFMIN(nbits - rmn_bits, rmn_bytes << 3));
1932 }
1933 
1946  int *got_frame_ptr, AVPacket *avpkt)
1947 {
1948  WMAVoiceContext *s = ctx->priv_data;
1949  GetBitContext *gb = &s->gb;
1950  int size, res, pos;
1951 
1952  /* Packets are sometimes a multiple of ctx->block_align, with a packet
1953  * header at each ctx->block_align bytes. However, Libav's ASF demuxer
1954  * feeds us ASF packets, which may concatenate multiple "codec" packets
1955  * in a single "muxer" packet, so we artificially emulate that by
1956  * capping the packet size at ctx->block_align. */
1957  for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
1958  if (!size) {
1959  *got_frame_ptr = 0;
1960  return 0;
1961  }
1962  init_get_bits(&s->gb, avpkt->data, size << 3);
1963 
1964  /* size == ctx->block_align is used to indicate whether we are dealing with
1965  * a new packet or a packet of which we already read the packet header
1966  * previously. */
1967  if (size == ctx->block_align) { // new packet header
1968  if ((res = parse_packet_header(s)) < 0)
1969  return res;
1970 
1971  /* If the packet header specifies a s->spillover_nbits, then we want
1972  * to push out all data of the previous packet (+ spillover) before
1973  * continuing to parse new superframes in the current packet. */
1974  if (s->spillover_nbits > 0) {
1975  if (s->sframe_cache_size > 0) {
1976  int cnt = get_bits_count(gb);
1977  copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
1978  flush_put_bits(&s->pb);
1980  if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 &&
1981  *got_frame_ptr) {
1982  cnt += s->spillover_nbits;
1983  s->skip_bits_next = cnt & 7;
1984  *(AVFrame *)data = s->frame;
1985  return cnt >> 3;
1986  } else
1987  skip_bits_long (gb, s->spillover_nbits - cnt +
1988  get_bits_count(gb)); // resync
1989  } else
1990  skip_bits_long(gb, s->spillover_nbits); // resync
1991  }
1992  } else if (s->skip_bits_next)
1993  skip_bits(gb, s->skip_bits_next);
1994 
1995  /* Try parsing superframes in current packet */
1996  s->sframe_cache_size = 0;
1997  s->skip_bits_next = 0;
1998  pos = get_bits_left(gb);
1999  if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) {
2000  return res;
2001  } else if (*got_frame_ptr) {
2002  int cnt = get_bits_count(gb);
2003  s->skip_bits_next = cnt & 7;
2004  *(AVFrame *)data = s->frame;
2005  return cnt >> 3;
2006  } else if ((s->sframe_cache_size = pos) > 0) {
2007  /* rewind bit reader to start of last (incomplete) superframe... */
2008  init_get_bits(gb, avpkt->data, size << 3);
2009  skip_bits_long(gb, (size << 3) - pos);
2010  assert(get_bits_left(gb) == pos);
2011 
2012  /* ...and cache it for spillover in next packet */
2014  copy_bits(&s->pb, avpkt->data, size, gb, s->sframe_cache_size);
2015  // FIXME bad - just copy bytes as whole and add use the
2016  // skip_bits_next field
2017  }
2018 
2019  return size;
2020 }
2021 
2023 {
2024  WMAVoiceContext *s = ctx->priv_data;
2025 
2026  if (s->do_apf) {
2027  ff_rdft_end(&s->rdft);
2028  ff_rdft_end(&s->irdft);
2029  ff_dct_end(&s->dct);
2030  ff_dct_end(&s->dst);
2031  }
2032 
2033  return 0;
2034 }
2035 
2037 {
2038  WMAVoiceContext *s = ctx->priv_data;
2039  int n;
2040 
2041  s->postfilter_agc = 0;
2042  s->sframe_cache_size = 0;
2043  s->skip_bits_next = 0;
2044  for (n = 0; n < s->lsps; n++)
2045  s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0);
2046  memset(s->excitation_history, 0,
2047  sizeof(*s->excitation_history) * MAX_SIGNAL_HISTORY);
2048  memset(s->synth_history, 0,
2049  sizeof(*s->synth_history) * MAX_LSPS);
2050  memset(s->gain_pred_err, 0,
2051  sizeof(s->gain_pred_err));
2052 
2053  if (s->do_apf) {
2054  memset(&s->synth_filter_out_buf[MAX_LSPS_ALIGN16 - s->lsps], 0,
2055  sizeof(*s->synth_filter_out_buf) * s->lsps);
2056  memset(s->dcf_mem, 0,
2057  sizeof(*s->dcf_mem) * 2);
2058  memset(s->zero_exc_pf, 0,
2059  sizeof(*s->zero_exc_pf) * s->history_nsamples);
2060  memset(s->denoise_filter_cache, 0, sizeof(s->denoise_filter_cache));
2061  }
2062 }
2063 
2065  .name = "wmavoice",
2066  .type = AVMEDIA_TYPE_AUDIO,
2067  .id = CODEC_ID_WMAVOICE,
2068  .priv_data_size = sizeof(WMAVoiceContext),
2072  .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
2073  .flush = wmavoice_flush,
2074  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
2075 };