Libav
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The Libav Project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include "libavutil/attributes.h"
44 #include "internal.h"
45 #include "avcodec.h"
46 #include "mpegvideo.h"
47 #include "h264.h"
48 
49 #include "h264data.h" // FIXME FIXME FIXME
50 
51 #include "h264_mvpred.h"
52 #include "golomb.h"
53 #include "hpeldsp.h"
54 #include "rectangle.h"
55 
56 #if CONFIG_ZLIB
57 #include <zlib.h>
58 #endif
59 
60 #include "svq1.h"
61 #include "svq3.h"
62 
68 typedef struct {
78  uint32_t watermark_key;
84 } SVQ3Context;
85 
86 #define FULLPEL_MODE 1
87 #define HALFPEL_MODE 2
88 #define THIRDPEL_MODE 3
89 #define PREDICT_MODE 4
90 
91 /* dual scan (from some older h264 draft)
92  * o-->o-->o o
93  * | /|
94  * o o o / o
95  * | / | |/ |
96  * o o o o
97  * /
98  * o-->o-->o-->o
99  */
100 static const uint8_t svq3_scan[16] = {
101  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
102  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
103  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
104  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
105 };
106 
107 static const uint8_t luma_dc_zigzag_scan[16] = {
108  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
109  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
110  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
111  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
112 };
113 
114 static const uint8_t svq3_pred_0[25][2] = {
115  { 0, 0 },
116  { 1, 0 }, { 0, 1 },
117  { 0, 2 }, { 1, 1 }, { 2, 0 },
118  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
119  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
120  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
121  { 2, 4 }, { 3, 3 }, { 4, 2 },
122  { 4, 3 }, { 3, 4 },
123  { 4, 4 }
124 };
125 
126 static const int8_t svq3_pred_1[6][6][5] = {
127  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
128  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
129  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
130  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
131  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
132  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
133  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
134  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
135  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
136  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
137  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
138  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
139 };
140 
141 static const struct {
144 } svq3_dct_tables[2][16] = {
145  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
146  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
147  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
148  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
149 };
150 
151 static const uint32_t svq3_dequant_coeff[32] = {
152  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
153  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
154  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
155  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
156 };
157 
158 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
159 {
160  const int qmul = svq3_dequant_coeff[qp];
161 #define stride 16
162  int i;
163  int temp[16];
164  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
165 
166  for (i = 0; i < 4; i++) {
167  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
168  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
169  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
170  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
171 
172  temp[4 * i + 0] = z0 + z3;
173  temp[4 * i + 1] = z1 + z2;
174  temp[4 * i + 2] = z1 - z2;
175  temp[4 * i + 3] = z0 - z3;
176  }
177 
178  for (i = 0; i < 4; i++) {
179  const int offset = x_offset[i];
180  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
181  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
182  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
183  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
184 
185  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
186  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
187  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
188  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
189  }
190 }
191 #undef stride
192 
193 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
194  int stride, int qp, int dc)
195 {
196  const int qmul = svq3_dequant_coeff[qp];
197  int i;
198 
199  if (dc) {
200  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
201  : qmul * (block[0] >> 3) / 2);
202  block[0] = 0;
203  }
204 
205  for (i = 0; i < 4; i++) {
206  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
207  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
208  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
209  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
210 
211  block[0 + 4 * i] = z0 + z3;
212  block[1 + 4 * i] = z1 + z2;
213  block[2 + 4 * i] = z1 - z2;
214  block[3 + 4 * i] = z0 - z3;
215  }
216 
217  for (i = 0; i < 4; i++) {
218  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
219  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
220  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
221  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
222  const int rr = (dc + 0x80000);
223 
224  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
225  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
226  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
227  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
228  }
229 
230  memset(block, 0, 16 * sizeof(int16_t));
231 }
232 
233 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
234  int index, const int type)
235 {
236  static const uint8_t *const scan_patterns[4] =
238 
239  int run, level, limit;
240  unsigned vlc;
241  const int intra = 3 * type >> 2;
242  const uint8_t *const scan = scan_patterns[type];
243 
244  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
245  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
246  int sign = (vlc & 1) ? 0 : -1;
247  vlc = vlc + 1 >> 1;
248 
249  if (type == 3) {
250  if (vlc < 3) {
251  run = 0;
252  level = vlc;
253  } else if (vlc < 4) {
254  run = 1;
255  level = 1;
256  } else {
257  run = vlc & 0x3;
258  level = (vlc + 9 >> 2) - run;
259  }
260  } else {
261  if (vlc < 16) {
262  run = svq3_dct_tables[intra][vlc].run;
263  level = svq3_dct_tables[intra][vlc].level;
264  } else if (intra) {
265  run = vlc & 0x7;
266  level = (vlc >> 3) +
267  ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
268  } else {
269  run = vlc & 0xF;
270  level = (vlc >> 4) +
271  ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
272  }
273  }
274 
275  if ((index += run) >= limit)
276  return -1;
277 
278  block[scan[index]] = (level ^ sign) - sign;
279  }
280 
281  if (type != 2) {
282  break;
283  }
284  }
285 
286  return 0;
287 }
288 
289 static inline void svq3_mc_dir_part(SVQ3Context *s,
290  int x, int y, int width, int height,
291  int mx, int my, int dxy,
292  int thirdpel, int dir, int avg)
293 {
294  H264Context *h = &s->h;
295  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
296  uint8_t *src, *dest;
297  int i, emu = 0;
298  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
299 
300  mx += x;
301  my += y;
302 
303  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
304  my < 0 || my >= s->v_edge_pos - height - 1) {
305  emu = 1;
306  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
307  my = av_clip(my, -16, s->v_edge_pos - height + 15);
308  }
309 
310  /* form component predictions */
311  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
312  src = pic->f.data[0] + mx + my * h->linesize;
313 
314  if (emu) {
316  h->linesize, h->linesize,
317  width + 1, height + 1,
318  mx, my, s->h_edge_pos, s->v_edge_pos);
319  src = h->edge_emu_buffer;
320  }
321  if (thirdpel)
322  (avg ? h->dsp.avg_tpel_pixels_tab
323  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
324  width, height);
325  else
326  (avg ? s->hdsp.avg_pixels_tab
327  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
328  height);
329 
330  if (!(h->flags & CODEC_FLAG_GRAY)) {
331  mx = mx + (mx < (int) x) >> 1;
332  my = my + (my < (int) y) >> 1;
333  width = width >> 1;
334  height = height >> 1;
335  blocksize++;
336 
337  for (i = 1; i < 3; i++) {
338  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
339  src = pic->f.data[i] + mx + my * h->uvlinesize;
340 
341  if (emu) {
343  h->uvlinesize, h->uvlinesize,
344  width + 1, height + 1,
345  mx, my, (s->h_edge_pos >> 1),
346  s->v_edge_pos >> 1);
347  src = h->edge_emu_buffer;
348  }
349  if (thirdpel)
350  (avg ? h->dsp.avg_tpel_pixels_tab
351  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
352  h->uvlinesize,
353  width, height);
354  else
355  (avg ? s->hdsp.avg_pixels_tab
356  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
357  h->uvlinesize,
358  height);
359  }
360  }
361 }
362 
363 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
364  int dir, int avg)
365 {
366  int i, j, k, mx, my, dx, dy, x, y;
367  H264Context *h = &s->h;
368  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
369  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
370  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
371  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
372  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
373 
374  for (i = 0; i < 16; i += part_height)
375  for (j = 0; j < 16; j += part_width) {
376  const int b_xy = (4 * h->mb_x + (j >> 2)) +
377  (4 * h->mb_y + (i >> 2)) * h->b_stride;
378  int dxy;
379  x = 16 * h->mb_x + j;
380  y = 16 * h->mb_y + i;
381  k = (j >> 2 & 1) + (i >> 1 & 2) +
382  (j >> 1 & 4) + (i & 8);
383 
384  if (mode != PREDICT_MODE) {
385  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
386  } else {
387  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
388  my = s->next_pic->motion_val[0][b_xy][1] << 1;
389 
390  if (dir == 0) {
391  mx = mx * h->frame_num_offset /
392  h->prev_frame_num_offset + 1 >> 1;
393  my = my * h->frame_num_offset /
394  h->prev_frame_num_offset + 1 >> 1;
395  } else {
396  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
397  h->prev_frame_num_offset + 1 >> 1;
398  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
399  h->prev_frame_num_offset + 1 >> 1;
400  }
401  }
402 
403  /* clip motion vector prediction to frame border */
404  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
405  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
406 
407  /* get (optional) motion vector differential */
408  if (mode == PREDICT_MODE) {
409  dx = dy = 0;
410  } else {
411  dy = svq3_get_se_golomb(&h->gb);
412  dx = svq3_get_se_golomb(&h->gb);
413 
414  if (dx == INVALID_VLC || dy == INVALID_VLC) {
415  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
416  return -1;
417  }
418  }
419 
420  /* compute motion vector */
421  if (mode == THIRDPEL_MODE) {
422  int fx, fy;
423  mx = (mx + 1 >> 1) + dx;
424  my = (my + 1 >> 1) + dy;
425  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
426  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
427  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
428 
429  svq3_mc_dir_part(s, x, y, part_width, part_height,
430  fx, fy, dxy, 1, dir, avg);
431  mx += mx;
432  my += my;
433  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
434  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
435  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
436  dxy = (mx & 1) + 2 * (my & 1);
437 
438  svq3_mc_dir_part(s, x, y, part_width, part_height,
439  mx >> 1, my >> 1, dxy, 0, dir, avg);
440  mx *= 3;
441  my *= 3;
442  } else {
443  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
444  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
445 
446  svq3_mc_dir_part(s, x, y, part_width, part_height,
447  mx, my, 0, 0, dir, avg);
448  mx *= 6;
449  my *= 6;
450  }
451 
452  /* update mv_cache */
453  if (mode != PREDICT_MODE) {
454  int32_t mv = pack16to32(mx, my);
455 
456  if (part_height == 8 && i < 8) {
457  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
458 
459  if (part_width == 8 && j < 8)
460  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
461  }
462  if (part_width == 8 && j < 8)
463  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
464  if (part_width == 4 || part_height == 4)
465  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
466  }
467 
468  /* write back motion vectors */
469  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
470  part_width >> 2, part_height >> 2, h->b_stride,
471  pack16to32(mx, my), 4);
472  }
473 
474  return 0;
475 }
476 
477 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
478 {
479  H264Context *h = &s->h;
480  int i, j, k, m, dir, mode;
481  int cbp = 0;
482  uint32_t vlc;
483  int8_t *top, *left;
484  const int mb_xy = h->mb_xy;
485  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
486 
487  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
488  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
489  h->topright_samples_available = 0xFFFF;
490 
491  if (mb_type == 0) { /* SKIP */
492  if (h->pict_type == AV_PICTURE_TYPE_P ||
493  s->next_pic->mb_type[mb_xy] == -1) {
494  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
495  0, 0, 0, 0, 0, 0);
496 
497  if (h->pict_type == AV_PICTURE_TYPE_B)
498  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
499  0, 0, 0, 0, 1, 1);
500 
501  mb_type = MB_TYPE_SKIP;
502  } else {
503  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
504  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
505  return -1;
506  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
507  return -1;
508 
509  mb_type = MB_TYPE_16x16;
510  }
511  } else if (mb_type < 8) { /* INTER */
512  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
513  mode = THIRDPEL_MODE;
514  else if (s->halfpel_flag &&
515  s->thirdpel_flag == !get_bits1(&h->gb))
516  mode = HALFPEL_MODE;
517  else
518  mode = FULLPEL_MODE;
519 
520  /* fill caches */
521  /* note ref_cache should contain here:
522  * ????????
523  * ???11111
524  * N??11111
525  * N??11111
526  * N??11111
527  */
528 
529  for (m = 0; m < 2; m++) {
530  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
531  for (i = 0; i < 4; i++)
532  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
533  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
534  } else {
535  for (i = 0; i < 4; i++)
536  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
537  }
538  if (h->mb_y > 0) {
539  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
540  h->cur_pic.motion_val[m][b_xy - h->b_stride],
541  4 * 2 * sizeof(int16_t));
542  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
543  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
544 
545  if (h->mb_x < h->mb_width - 1) {
546  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
547  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
548  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
549  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
550  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
551  } else
552  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
553  if (h->mb_x > 0) {
554  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
555  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
556  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
557  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
558  } else
559  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
560  } else
561  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
562  PART_NOT_AVAILABLE, 8);
563 
564  if (h->pict_type != AV_PICTURE_TYPE_B)
565  break;
566  }
567 
568  /* decode motion vector(s) and form prediction(s) */
569  if (h->pict_type == AV_PICTURE_TYPE_P) {
570  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
571  return -1;
572  } else { /* AV_PICTURE_TYPE_B */
573  if (mb_type != 2) {
574  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
575  return -1;
576  } else {
577  for (i = 0; i < 4; i++)
578  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
579  0, 4 * 2 * sizeof(int16_t));
580  }
581  if (mb_type != 1) {
582  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
583  return -1;
584  } else {
585  for (i = 0; i < 4; i++)
586  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
587  0, 4 * 2 * sizeof(int16_t));
588  }
589  }
590 
591  mb_type = MB_TYPE_16x16;
592  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
593  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
594 
595  if (mb_type == 8) {
596  if (h->mb_x > 0) {
597  for (i = 0; i < 4; i++)
598  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
599  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
600  h->left_samples_available = 0x5F5F;
601  }
602  if (h->mb_y > 0) {
603  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
604  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
605  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
606  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
607 
608  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
609  h->top_samples_available = 0x33FF;
610  }
611 
612  /* decode prediction codes for luma blocks */
613  for (i = 0; i < 16; i += 2) {
614  vlc = svq3_get_ue_golomb(&h->gb);
615 
616  if (vlc >= 25) {
617  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
618  return -1;
619  }
620 
621  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
622  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
623 
624  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
625  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
626 
627  if (left[1] == -1 || left[2] == -1) {
628  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
629  return -1;
630  }
631  }
632  } else { /* mb_type == 33, DC_128_PRED block type */
633  for (i = 0; i < 4; i++)
634  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
635  }
636 
638 
639  if (mb_type == 8) {
641 
642  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
643  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
644  } else {
645  for (i = 0; i < 4; i++)
646  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
647 
648  h->top_samples_available = 0x33FF;
649  h->left_samples_available = 0x5F5F;
650  }
651 
652  mb_type = MB_TYPE_INTRA4x4;
653  } else { /* INTRA16x16 */
654  dir = i_mb_type_info[mb_type - 8].pred_mode;
655  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
656 
657  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
658  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
659  return h->intra16x16_pred_mode;
660  }
661 
662  cbp = i_mb_type_info[mb_type - 8].cbp;
663  mb_type = MB_TYPE_INTRA16x16;
664  }
665 
666  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
667  for (i = 0; i < 4; i++)
668  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
669  0, 4 * 2 * sizeof(int16_t));
670  if (h->pict_type == AV_PICTURE_TYPE_B) {
671  for (i = 0; i < 4; i++)
672  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
673  0, 4 * 2 * sizeof(int16_t));
674  }
675  }
676  if (!IS_INTRA4x4(mb_type)) {
677  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
678  }
679  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
680  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
681  }
682 
683  if (!IS_INTRA16x16(mb_type) &&
684  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
685  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
686  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
687  return -1;
688  }
689 
690  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
691  : golomb_to_inter_cbp[vlc];
692  }
693  if (IS_INTRA16x16(mb_type) ||
694  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
695  h->qscale += svq3_get_se_golomb(&h->gb);
696 
697  if (h->qscale > 31u) {
698  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
699  return -1;
700  }
701  }
702  if (IS_INTRA16x16(mb_type)) {
703  AV_ZERO128(h->mb_luma_dc[0] + 0);
704  AV_ZERO128(h->mb_luma_dc[0] + 8);
705  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
707  "error while decoding intra luma dc\n");
708  return -1;
709  }
710  }
711 
712  if (cbp) {
713  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
714  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
715 
716  for (i = 0; i < 4; i++)
717  if ((cbp & (1 << i))) {
718  for (j = 0; j < 4; j++) {
719  k = index ? (1 * (j & 1) + 2 * (i & 1) +
720  2 * (j & 2) + 4 * (i & 2))
721  : (4 * i + j);
722  h->non_zero_count_cache[scan8[k]] = 1;
723 
724  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
726  "error while decoding block\n");
727  return -1;
728  }
729  }
730  }
731 
732  if ((cbp & 0x30)) {
733  for (i = 1; i < 3; ++i)
734  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
736  "error while decoding chroma dc block\n");
737  return -1;
738  }
739 
740  if ((cbp & 0x20)) {
741  for (i = 1; i < 3; i++) {
742  for (j = 0; j < 4; j++) {
743  k = 16 * i + j;
744  h->non_zero_count_cache[scan8[k]] = 1;
745 
746  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
748  "error while decoding chroma ac block\n");
749  return -1;
750  }
751  }
752  }
753  }
754  }
755  }
756 
757  h->cbp = cbp;
758  h->cur_pic.mb_type[mb_xy] = mb_type;
759 
760  if (IS_INTRA(mb_type))
762 
763  return 0;
764 }
765 
767 {
768  SVQ3Context *s = avctx->priv_data;
769  H264Context *h = &s->h;
770  const int mb_xy = h->mb_xy;
771  int i, header;
772  unsigned slice_id;
773 
774  header = get_bits(&h->gb, 8);
775 
776  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
777  /* TODO: what? */
778  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
779  return -1;
780  } else {
781  int length = header >> 5 & 3;
782 
784  8 * show_bits(&h->gb, 8 * length) +
785  8 * length;
786 
787  if (s->next_slice_index > h->gb.size_in_bits) {
788  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
789  return -1;
790  }
791 
792  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
793  skip_bits(&h->gb, 8);
794 
795  if (s->watermark_key) {
796  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
797  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
798  header ^ s->watermark_key);
799  }
800  if (length > 0) {
801  memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
802  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
803  }
804  skip_bits_long(&h->gb, 0);
805  }
806 
807  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
808  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
809  return -1;
810  }
811 
812  h->slice_type = golomb_to_pict_type[slice_id];
813 
814  if ((header & 0x9F) == 2) {
815  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
816  h->mb_skip_run = get_bits(&h->gb, i) -
817  (h->mb_y * h->mb_width + h->mb_x);
818  } else {
819  skip_bits1(&h->gb);
820  h->mb_skip_run = 0;
821  }
822 
823  h->slice_num = get_bits(&h->gb, 8);
824  h->qscale = get_bits(&h->gb, 5);
825  s->adaptive_quant = get_bits1(&h->gb);
826 
827  /* unknown fields */
828  skip_bits1(&h->gb);
829 
830  if (s->unknown_flag)
831  skip_bits1(&h->gb);
832 
833  skip_bits1(&h->gb);
834  skip_bits(&h->gb, 2);
835 
836  while (get_bits1(&h->gb))
837  skip_bits(&h->gb, 8);
838 
839  /* reset intra predictors and invalidate motion vector references */
840  if (h->mb_x > 0) {
841  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
842  -1, 4 * sizeof(int8_t));
843  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
844  -1, 8 * sizeof(int8_t) * h->mb_x);
845  }
846  if (h->mb_y > 0) {
847  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
848  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
849 
850  if (h->mb_x > 0)
851  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
852  }
853 
854  return 0;
855 }
856 
858 {
859  SVQ3Context *s = avctx->priv_data;
860  H264Context *h = &s->h;
861  int m;
862  unsigned char *extradata;
863  unsigned char *extradata_end;
864  unsigned int size;
865  int marker_found = 0;
866 
867  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
868  s->last_pic = av_mallocz(sizeof(*s->last_pic));
869  s->next_pic = av_mallocz(sizeof(*s->next_pic));
870  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
871  av_freep(&s->cur_pic);
872  av_freep(&s->last_pic);
873  av_freep(&s->next_pic);
874  return AVERROR(ENOMEM);
875  }
876 
877  if (ff_h264_decode_init(avctx) < 0)
878  return -1;
879 
880  ff_hpeldsp_init(&s->hdsp, avctx->flags);
881  h->flags = avctx->flags;
882  h->is_complex = 1;
884  avctx->pix_fmt = avctx->codec->pix_fmts[0];
885 
886  h->chroma_qp[0] = h->chroma_qp[1] = 4;
887  h->chroma_x_shift = h->chroma_y_shift = 1;
888 
889  s->halfpel_flag = 1;
890  s->thirdpel_flag = 1;
891  s->unknown_flag = 0;
892 
893  /* prowl for the "SEQH" marker in the extradata */
894  extradata = (unsigned char *)avctx->extradata;
895  extradata_end = avctx->extradata + avctx->extradata_size;
896  if (extradata) {
897  for (m = 0; m + 8 < avctx->extradata_size; m++) {
898  if (!memcmp(extradata, "SEQH", 4)) {
899  marker_found = 1;
900  break;
901  }
902  extradata++;
903  }
904  }
905 
906  /* if a match was found, parse the extra data */
907  if (marker_found) {
908  GetBitContext gb;
909  int frame_size_code;
910 
911  size = AV_RB32(&extradata[4]);
912  if (size > extradata_end - extradata - 8)
913  return AVERROR_INVALIDDATA;
914  init_get_bits(&gb, extradata + 8, size * 8);
915 
916  /* 'frame size code' and optional 'width, height' */
917  frame_size_code = get_bits(&gb, 3);
918  switch (frame_size_code) {
919  case 0:
920  avctx->width = 160;
921  avctx->height = 120;
922  break;
923  case 1:
924  avctx->width = 128;
925  avctx->height = 96;
926  break;
927  case 2:
928  avctx->width = 176;
929  avctx->height = 144;
930  break;
931  case 3:
932  avctx->width = 352;
933  avctx->height = 288;
934  break;
935  case 4:
936  avctx->width = 704;
937  avctx->height = 576;
938  break;
939  case 5:
940  avctx->width = 240;
941  avctx->height = 180;
942  break;
943  case 6:
944  avctx->width = 320;
945  avctx->height = 240;
946  break;
947  case 7:
948  avctx->width = get_bits(&gb, 12);
949  avctx->height = get_bits(&gb, 12);
950  break;
951  }
952 
953  s->halfpel_flag = get_bits1(&gb);
954  s->thirdpel_flag = get_bits1(&gb);
955 
956  /* unknown fields */
957  skip_bits1(&gb);
958  skip_bits1(&gb);
959  skip_bits1(&gb);
960  skip_bits1(&gb);
961 
962  h->low_delay = get_bits1(&gb);
963 
964  /* unknown field */
965  skip_bits1(&gb);
966 
967  while (get_bits1(&gb))
968  skip_bits(&gb, 8);
969 
970  s->unknown_flag = get_bits1(&gb);
971  avctx->has_b_frames = !h->low_delay;
972  if (s->unknown_flag) {
973 #if CONFIG_ZLIB
974  unsigned watermark_width = svq3_get_ue_golomb(&gb);
975  unsigned watermark_height = svq3_get_ue_golomb(&gb);
976  int u1 = svq3_get_ue_golomb(&gb);
977  int u2 = get_bits(&gb, 8);
978  int u3 = get_bits(&gb, 2);
979  int u4 = svq3_get_ue_golomb(&gb);
980  unsigned long buf_len = watermark_width *
981  watermark_height * 4;
982  int offset = get_bits_count(&gb) + 7 >> 3;
983  uint8_t *buf;
984 
985  if (watermark_height > 0 &&
986  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
987  return -1;
988 
989  buf = av_malloc(buf_len);
990  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
991  watermark_width, watermark_height);
992  av_log(avctx, AV_LOG_DEBUG,
993  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
994  u1, u2, u3, u4, offset);
995  if (uncompress(buf, &buf_len, extradata + 8 + offset,
996  size - offset) != Z_OK) {
997  av_log(avctx, AV_LOG_ERROR,
998  "could not uncompress watermark logo\n");
999  av_free(buf);
1000  return -1;
1001  }
1002  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1003  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1004  av_log(avctx, AV_LOG_DEBUG,
1005  "watermark key %#x\n", s->watermark_key);
1006  av_free(buf);
1007 #else
1008  av_log(avctx, AV_LOG_ERROR,
1009  "this svq3 file contains watermark which need zlib support compiled in\n");
1010  return -1;
1011 #endif
1012  }
1013  }
1014 
1015  h->width = avctx->width;
1016  h->height = avctx->height;
1017  h->mb_width = (h->width + 15) / 16;
1018  h->mb_height = (h->height + 15) / 16;
1019  h->mb_stride = h->mb_width + 1;
1020  h->mb_num = h->mb_width * h->mb_height;
1021  h->b_stride = 4 * h->mb_width;
1022  s->h_edge_pos = h->mb_width * 16;
1023  s->v_edge_pos = h->mb_height * 16;
1024 
1025  if (ff_h264_alloc_tables(h) < 0) {
1026  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1027  return AVERROR(ENOMEM);
1028  }
1029 
1030  return 0;
1031 }
1032 
1033 static void free_picture(AVCodecContext *avctx, Picture *pic)
1034 {
1035  int i;
1036  for (i = 0; i < 2; i++) {
1037  av_buffer_unref(&pic->motion_val_buf[i]);
1038  av_buffer_unref(&pic->ref_index_buf[i]);
1039  }
1041 
1042  av_frame_unref(&pic->f);
1043 }
1044 
1045 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1046 {
1047  SVQ3Context *s = avctx->priv_data;
1048  H264Context *h = &s->h;
1049  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1050  const int mb_array_size = h->mb_stride * h->mb_height;
1051  const int b4_stride = h->mb_width * 4 + 1;
1052  const int b4_array_size = b4_stride * h->mb_height * 4;
1053  int ret;
1054 
1055  if (!pic->motion_val_buf[0]) {
1056  int i;
1057 
1058  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1059  if (!pic->mb_type_buf)
1060  return AVERROR(ENOMEM);
1061  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1062 
1063  for (i = 0; i < 2; i++) {
1064  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1065  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1066  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1067  ret = AVERROR(ENOMEM);
1068  goto fail;
1069  }
1070 
1071  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1072  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1073  }
1074  }
1075  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1076 
1077  ret = ff_get_buffer(avctx, &pic->f,
1078  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1079  if (ret < 0)
1080  goto fail;
1081 
1082  if (!h->edge_emu_buffer) {
1083  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1084  if (!h->edge_emu_buffer)
1085  return AVERROR(ENOMEM);
1086  }
1087 
1088  h->linesize = pic->f.linesize[0];
1089  h->uvlinesize = pic->f.linesize[1];
1090 
1091  return 0;
1092 fail:
1093  free_picture(avctx, pic);
1094  return ret;
1095 }
1096 
1097 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1098  int *got_frame, AVPacket *avpkt)
1099 {
1100  const uint8_t *buf = avpkt->data;
1101  SVQ3Context *s = avctx->priv_data;
1102  H264Context *h = &s->h;
1103  int buf_size = avpkt->size;
1104  int ret, m, i;
1105 
1106  /* special case for last picture */
1107  if (buf_size == 0) {
1108  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1109  ret = av_frame_ref(data, &s->next_pic->f);
1110  if (ret < 0)
1111  return ret;
1112  s->last_frame_output = 1;
1113  *got_frame = 1;
1114  }
1115  return 0;
1116  }
1117 
1118  init_get_bits(&h->gb, buf, 8 * buf_size);
1119 
1120  h->mb_x = h->mb_y = h->mb_xy = 0;
1121 
1122  if (svq3_decode_slice_header(avctx))
1123  return -1;
1124 
1125  h->pict_type = h->slice_type;
1126 
1127  if (h->pict_type != AV_PICTURE_TYPE_B)
1128  FFSWAP(Picture*, s->next_pic, s->last_pic);
1129 
1130  av_frame_unref(&s->cur_pic->f);
1131 
1132  /* for skipping the frame */
1133  s->cur_pic->f.pict_type = h->pict_type;
1135 
1136  ret = get_buffer(avctx, s->cur_pic);
1137  if (ret < 0)
1138  return ret;
1139 
1140  h->cur_pic_ptr = s->cur_pic;
1141  av_frame_unref(&h->cur_pic.f);
1142  h->cur_pic = *s->cur_pic;
1143  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1144  if (ret < 0)
1145  return ret;
1146 
1147  for (i = 0; i < 16; i++) {
1148  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1149  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1150  }
1151  for (i = 0; i < 16; i++) {
1152  h->block_offset[16 + i] =
1153  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1154  h->block_offset[48 + 16 + i] =
1155  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1156  }
1157 
1158  if (h->pict_type != AV_PICTURE_TYPE_I) {
1159  if (!s->last_pic->f.data[0]) {
1160  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1161  ret = get_buffer(avctx, s->last_pic);
1162  if (ret < 0)
1163  return ret;
1164  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1165  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1166  s->last_pic->f.linesize[1]);
1167  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1168  s->last_pic->f.linesize[2]);
1169  }
1170 
1171  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1172  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1173  ret = get_buffer(avctx, s->next_pic);
1174  if (ret < 0)
1175  return ret;
1176  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1177  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1178  s->next_pic->f.linesize[1]);
1179  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1180  s->next_pic->f.linesize[2]);
1181  }
1182  }
1183 
1184  if (avctx->debug & FF_DEBUG_PICT_INFO)
1186  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1188  s->halfpel_flag, s->thirdpel_flag,
1189  s->adaptive_quant, h->qscale, h->slice_num);
1190 
1191  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1193  avctx->skip_frame >= AVDISCARD_ALL)
1194  return 0;
1195 
1196  if (s->next_p_frame_damaged) {
1197  if (h->pict_type == AV_PICTURE_TYPE_B)
1198  return 0;
1199  else
1200  s->next_p_frame_damaged = 0;
1201  }
1202 
1203  if (h->pict_type == AV_PICTURE_TYPE_B) {
1205 
1206  if (h->frame_num_offset < 0)
1207  h->frame_num_offset += 256;
1208  if (h->frame_num_offset == 0 ||
1210  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1211  return -1;
1212  }
1213  } else {
1214  h->prev_frame_num = h->frame_num;
1215  h->frame_num = h->slice_num;
1217 
1218  if (h->prev_frame_num_offset < 0)
1219  h->prev_frame_num_offset += 256;
1220  }
1221 
1222  for (m = 0; m < 2; m++) {
1223  int i;
1224  for (i = 0; i < 4; i++) {
1225  int j;
1226  for (j = -1; j < 4; j++)
1227  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1228  if (i < 3)
1229  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1230  }
1231  }
1232 
1233  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1234  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1235  unsigned mb_type;
1236  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1237 
1238  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1239  ((get_bits_count(&h->gb) & 7) == 0 ||
1240  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1241  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1242  h->gb.size_in_bits = 8 * buf_size;
1243 
1244  if (svq3_decode_slice_header(avctx))
1245  return -1;
1246 
1247  /* TODO: support s->mb_skip_run */
1248  }
1249 
1250  mb_type = svq3_get_ue_golomb(&h->gb);
1251 
1252  if (h->pict_type == AV_PICTURE_TYPE_I)
1253  mb_type += 8;
1254  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1255  mb_type += 4;
1256  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1258  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1259  return -1;
1260  }
1261 
1262  if (mb_type != 0)
1264 
1265  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1266  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1267  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1268  }
1269 
1270  ff_draw_horiz_band(avctx, s->cur_pic,
1271  s->last_pic->f.data[0] ? s->last_pic : NULL,
1272  16 * h->mb_y, 16, h->picture_structure, 0,
1273  h->low_delay);
1274  }
1275 
1276  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1277  ret = av_frame_ref(data, &s->cur_pic->f);
1278  else if (s->last_pic->f.data[0])
1279  ret = av_frame_ref(data, &s->last_pic->f);
1280  if (ret < 0)
1281  return ret;
1282 
1283  /* Do not output the last pic after seeking. */
1284  if (s->last_pic->f.data[0] || h->low_delay)
1285  *got_frame = 1;
1286 
1287  if (h->pict_type != AV_PICTURE_TYPE_B) {
1288  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1289  } else {
1290  av_frame_unref(&s->cur_pic->f);
1291  }
1292 
1293  return buf_size;
1294 }
1295 
1297 {
1298  SVQ3Context *s = avctx->priv_data;
1299  H264Context *h = &s->h;
1300 
1301  free_picture(avctx, s->cur_pic);
1302  free_picture(avctx, s->next_pic);
1303  free_picture(avctx, s->last_pic);
1304  av_freep(&s->cur_pic);
1305  av_freep(&s->next_pic);
1306  av_freep(&s->last_pic);
1307 
1308  av_frame_unref(&h->cur_pic.f);
1309 
1311 
1312  return 0;
1313 }
1314 
1316  .name = "svq3",
1317  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1318  .type = AVMEDIA_TYPE_VIDEO,
1319  .id = AV_CODEC_ID_SVQ3,
1320  .priv_data_size = sizeof(SVQ3Context),
1324  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1325  CODEC_CAP_DR1 |
1327  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1328  AV_PIX_FMT_NONE},
1329 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:803
uint8_t pred_mode
Definition: h264data.h:76
static const struct @58 svq3_dct_tables[2][16]
const struct AVCodec * codec
Definition: avcodec.h:1063
#define MB_TYPE_SKIP
Definition: avcodec.h:813
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
discard all frames except keyframes
Definition: avcodec.h:545
uint8_t * edge_emu_buffer
Definition: h264.h:649
int8_t * ref_index[2]
Definition: mpegvideo.h:140
unsigned int top_samples_available
Definition: h264.h:318
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
GetBitContext gb
Definition: h264.h:267
int low_delay
Definition: h264.h:288
int mb_num
Definition: h264.h:460
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:105
int size
#define IS_SKIP(a)
Definition: mpegvideo.h:163
Picture * last_pic
Definition: svq3.c:73
ptrdiff_t uvlinesize
Definition: h264.h:281
int cbp
Definition: h264.h:428
HpelDSPContext hdsp
Definition: svq3.c:70
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:233
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:199
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1517
int mb_y
Definition: h264.h:454
int size
Definition: avcodec.h:974
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:802
int chroma_x_shift
Definition: h264.h:282
const uint8_t * buffer
Definition: get_bits.h:54
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:111
#define INVALID_VLC
Definition: golomb.h:38
int flags
Definition: h264.h:291
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1247
int mb_height
Definition: h264.h:458
static void free_picture(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1033
DSPContext dsp
Definition: h264.h:260
mpegvideo header.
int v_edge_pos
Definition: svq3.c:82
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:334
H264Context.
Definition: h264.h:258
discard all
Definition: avcodec.h:546
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:158
uint8_t run
Definition: svq3.c:142
#define FULLPEL_MODE
Definition: svq3.c:86
AVCodec.
Definition: avcodec.h:2755
int picture_structure
Definition: h264.h:375
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:827
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
int mb_skip_run
Definition: h264.h:457
Macro definitions for various function/variable attributes.
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:363
enum AVDiscard skip_frame
Definition: avcodec.h:2701
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
int thirdpel_flag
Definition: svq3.c:75
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:269
#define IS_INTER(a)
Definition: mpegvideo.h:162
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:107
uint8_t
#define av_cold
Definition: attributes.h:66
int prev_frame_num_offset
for POC type 2
Definition: h264.h:504
#define DC_PRED8x8
Definition: h264pred.h:68
#define PICT_FRAME
Definition: mpegvideo.h:646
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2349
#define AV_RB32
Definition: intreadwrite.h:130
static int get_buffer(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1045
int mb_xy
Definition: h264.h:461
#define AV_WL32(p, d)
Definition: intreadwrite.h:255
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:174
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1162
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:711
const char data[16]
Definition: mxf.c:66
int height
Definition: h264.h:280
int mb_x
Definition: h264.h:454
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:80
uint8_t * data
Definition: avcodec.h:973
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
int chroma_y_shift
Definition: h264.h:282
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:109
int width
Definition: h264.h:280
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:477
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:500
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:536
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int next_slice_index
Definition: svq3.c:77
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1332
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int16_t mb_luma_dc[3][16 *2]
Definition: h264.h:417
#define HALFPEL_MODE
Definition: svq3.c:87
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: dsputil.h:182
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2608
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:740
#define AVERROR(e)
Definition: error.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:142
int reference
Definition: mpegvideo.h:200
Picture * next_pic
Definition: svq3.c:72
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1142
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:148
const char * name
Name of the codec implementation.
Definition: avcodec.h:2762
#define PREDICT_MODE
Definition: svq3.c:89
void ff_draw_horiz_band(AVCodecContext *avctx, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int low_delay)
Definition: mpegvideo.c:2316
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:811
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:93
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:37
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:221
int chroma_pred_mode
Definition: h264.h:298
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:193
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:320
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:106
Half-pel DSP context.
Definition: hpeldsp.h:45
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2776
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:168
int frame_num_offset
for POC type 2
Definition: h264.h:503
uint32_t * mb2br_xy
Definition: h264.h:351
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:705
ptrdiff_t linesize
Definition: h264.h:281
#define FFMIN(a, b)
Definition: common.h:57
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
H264Context h
Definition: svq3.c:69
int width
picture width / height.
Definition: avcodec.h:1217
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:158
Picture.
Definition: mpegvideo.h:99
int size_in_bits
Definition: get_bits.h:56
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:254
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:857
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:314
#define AV_RL32
Definition: intreadwrite.h:146
unsigned int topright_samples_available
Definition: h264.h:319
int slice_type
Definition: h264.h:367
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:43
int last_frame_output
Definition: svq3.c:83
#define PART_NOT_AVAILABLE
Definition: h264.h:337
int next_p_frame_damaged
Definition: svq3.c:80
if(ac->has_optimized_func)
Picture cur_pic
Definition: h264.h:272
static const int8_t mv[256][2]
Definition: 4xm.c:72
VideoDSPContext vdsp
Definition: h264.h:261
NULL
Definition: eval.c:55
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1315
static int width
Definition: utils.c:156
int mb_stride
Definition: h264.h:459
AVCodecContext * avctx
Definition: h264.h:259
Libavcodec external API header.
H264 / AVC / MPEG4 part10 codec data table
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:505
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:125
int debug
debug
Definition: avcodec.h:2348
main external API structure.
Definition: avcodec.h:1054
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:489
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:489
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1295
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:575
int16_t mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:416
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int extradata_size
Definition: avcodec.h:1163
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:81
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:296
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
int index
Definition: gxfenc.c:72
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:62
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:289
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
#define MB_TYPE_16x16
Definition: avcodec.h:805
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1296
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:114
int unknown_flag
Definition: svq3.c:76
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:348
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:273
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4983
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:113
uint8_t level
Definition: svq3.c:143
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:159
Definition: vp9.h:56
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
int height
Definition: gxfenc.c:72
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:181
discard all non reference
Definition: avcodec.h:543
int is_complex
Definition: h264.h:463
int qscale
Definition: h264.h:284
uint8_t cbp
Definition: h264data.h:77
common internal api header.
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
int h_edge_pos
Definition: svq3.c:81
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:255
#define stride
int chroma_qp[2]
Definition: h264.h:275
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:49
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:498
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:871
int intra16x16_pred_mode
Definition: h264.h:299
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:151
void * priv_data
Definition: avcodec.h:1090
#define THIRDPEL_MODE
Definition: svq3.c:88
Picture * cur_pic_ptr
Definition: h264.h:271
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:327
#define av_log2
Definition: intmath.h:85
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:766
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:163
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
int mb_width
Definition: h264.h:458
enum AVPictureType pict_type
Definition: h264.h:567
static const uint8_t svq3_scan[16]
Definition: svq3.c:100
Picture * cur_pic
Definition: svq3.c:71
struct AVFrame f
Definition: mpegvideo.h:100
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:126
uint32_t * mb_type
Definition: mpegvideo.h:110
uint32_t watermark_key
Definition: svq3.c:78
int8_t * intra4x4_pred_mode
Definition: h264.h:315
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define FFSWAP(type, a, b)
Definition: common.h:60
exp golomb vlc stuff
int slice_num
Definition: h264.h:365
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1097
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:877
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
for(j=16;j >0;--j)
int b_stride
Definition: h264.h:352
Predicted.
Definition: avutil.h:254
int halfpel_flag
Definition: svq3.c:74
int adaptive_quant
Definition: svq3.c:79
int8_t ref_cache[2][5 *8]
Definition: h264.h:335
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:139
static int16_t block[64]
Definition: dct-test.c:170