168 for (i = 0; i < 4; i++)
171 for (i = 0; i < 4; i++)
175 for (i = 0; i < 3; i++)
184 for (i = 0; i < 4; i++) {
217 if (buf_size - size < 0)
241 for (i = 0; i < 4; i++) {
305 int header_size, hscale, vscale, i, j, k, l, m, ret;
312 header_size =
AV_RL24(buf) >> 5;
324 if (header_size > buf_size - 7*s->
keyframe) {
330 if (
AV_RL24(buf) != 0x2a019d) {
334 width =
AV_RL16(buf+3) & 0x3fff;
335 height =
AV_RL16(buf+5) & 0x3fff;
336 hscale = buf[4] >> 6;
337 vscale = buf[6] >> 6;
341 if (hscale || vscale)
345 for (i = 0; i < 4; i++)
346 for (j = 0; j < 16; j++)
358 buf_size -= header_size;
405 for (i = 0; i < 4; i++)
406 for (j = 0; j < 8; j++)
407 for (k = 0; k < 3; k++)
424 for (i = 0; i < 4; i++)
427 for (i = 0; i < 3; i++)
431 for (i = 0; i < 2; i++)
432 for (j = 0; j < 19; j++)
456 for (i = 0; i < 3; i++)
458 for (i = 9; i > 3; i--)
500 *mbsplits_cur, *firstidx;
510 top_mv = top_mb->
bmv;
527 for (n = 0; n < num; n++) {
529 uint32_t left, above;
533 left =
AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
535 left =
AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
537 above =
AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
539 above =
AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
568 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
569 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
591 #define MV_EDGE_CHECK(n)\
593 VP8Macroblock *edge = mb_edge[n];\
594 int edge_ref = edge->ref_frame;\
595 if (edge_ref != VP56_FRAME_CURRENT) {\
596 uint32_t mv = AV_RN32A(&edge->mv);\
598 if (cur_sign_bias != sign_bias[edge_ref]) {\
601 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
603 if (!n || mv != AV_RN32A(&near_mv[idx]))\
604 AV_WN32A(&near_mv[++idx], mv);\
605 cnt[idx] += 1 + (n != 2);\
607 cnt[CNT_ZERO] += 1 + (n != 2);\
620 if (cnt[CNT_SPLITMV] &&
AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) ==
AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
621 cnt[CNT_NEAREST] += 1;
624 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
626 FFSWAP(
VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
633 clamp_mv(s, &mb->
mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
663 int mb_x,
int keyframe,
int layout)
679 for (y = 0; y < 4; y++) {
680 for (x = 0; x < 4; x++) {
684 left[y] = top[x] = *intra4x4;
690 for (i = 0; i < 16; i++)
704 *segment = ref ? *ref : *segment;
750 #ifndef decode_block_coeffs_internal
762 int i,
uint8_t *token_prob, int16_t qmul[2])
775 token_prob = probs[i][0];
781 token_prob = probs[i+1][1];
801 int cat = (a<<1) + b;
802 coeff = 3 + (8<<cat);
806 token_prob = probs[i+1][2];
830 int i,
int zero_nhood, int16_t qmul[2])
832 uint8_t *token_prob = probs[i][zero_nhood];
842 int i, x, y, luma_start = 0, luma_ctx = 3;
843 int nnz_pred, nnz, nnz_total = 0;
848 nnz_pred = t_nnz[8] + l_nnz[8];
853 l_nnz[8] = t_nnz[8] = !!nnz;
867 for (y = 0; y < 4; y++)
868 for (x = 0; x < 4; x++) {
869 nnz_pred = l_nnz[y] + t_nnz[x];
874 t_nnz[x] = l_nnz[y] = !!nnz;
881 for (i = 4; i < 6; i++)
882 for (y = 0; y < 2; y++)
883 for (x = 0; x < 2; x++) {
884 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
888 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
901 int linesize,
int uvlinesize,
int simple)
905 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
906 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
912 int linesize,
int uvlinesize,
int mb_x,
int mb_y,
int mb_width,
913 int simple,
int xchg)
915 uint8_t *top_border_m1 = top_border-32;
917 src_cb -= uvlinesize;
918 src_cr -= uvlinesize;
920 #define XCHG(a,b,xchg) do { \
921 if (xchg) AV_SWAP64(b,a); \
922 else AV_COPY64(b,a); \
925 XCHG(top_border_m1+8, src_y-8, xchg);
926 XCHG(top_border, src_y, xchg);
927 XCHG(top_border+8, src_y+8, 1);
928 if (mb_x < mb_width-1)
929 XCHG(top_border+32, src_y+16, 1);
933 if (!simple || !mb_y) {
934 XCHG(top_border_m1+16, src_cb-8, xchg);
935 XCHG(top_border_m1+24, src_cr-8, xchg);
936 XCHG(top_border+16, src_cb, 1);
937 XCHG(top_border+24, src_cr, 1);
1025 int x, y, mode, nnz;
1041 uint8_t tr_top[4] = { 127, 127, 127, 127 };
1051 tr = tr_right[-1]*0x01010101u;
1058 for (y = 0; y < 4; y++) {
1060 for (x = 0; x < 4; x++) {
1065 if ((y == 0 || x == 3) && mb_y == 0) {
1068 topright = tr_right;
1072 dst = copy_dst + 12;
1076 AV_WN32A(copy_dst+4, 127
U * 0x01010101U);
1082 copy_dst[3] = ptr[4*x-s->
linesize-1];
1089 copy_dst[35] = 129
U;
1091 copy_dst[11] = ptr[4*x -1];
1092 copy_dst[19] = ptr[4*x+s->
linesize -1];
1093 copy_dst[27] = ptr[4*x+s->
linesize*2-1];
1094 copy_dst[35] = ptr[4*x+s->
linesize*3-1];
1097 s->
hpc.
pred4x4[mode](dst, topright, linesize);
1131 { 0, 1, 2, 1, 2, 1, 2, 1 },
1133 { 0, 3, 5, 3, 5, 3, 5, 3 },
1134 { 0, 2, 3, 2, 3, 2, 3, 2 },
1156 int x_off,
int y_off,
int block_w,
int block_h,
1163 int src_linesize = linesize;
1165 int mx = (mv->
x << 1)&7, mx_idx =
subpel_idx[0][mx];
1166 int my = (mv->
y << 1)&7, my_idx =
subpel_idx[0][my];
1168 x_off += mv->
x >> 2;
1169 y_off += mv->
y >> 2;
1173 src += y_off * linesize + x_off;
1174 if (x_off < mx_idx || x_off >= width - block_w -
subpel_idx[2][mx] ||
1175 y_off < my_idx || y_off >= height - block_h -
subpel_idx[2][my]) {
1177 src - my_idx * linesize - mx_idx,
1180 x_off - mx_idx, y_off - my_idx, width, height);
1184 mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1187 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1211 int block_w,
int block_h,
int width,
int height, ptrdiff_t linesize,
1220 x_off += mv->
x >> 3;
1221 y_off += mv->
y >> 3;
1224 src1 += y_off * linesize + x_off;
1225 src2 += y_off * linesize + x_off;
1227 if (x_off < mx_idx || x_off >= width - block_w -
subpel_idx[2][mx] ||
1228 y_off < my_idx || y_off >= height - block_h -
subpel_idx[2][my]) {
1230 src1 - my_idx * linesize - mx_idx,
1233 x_off - mx_idx, y_off - my_idx, width, height);
1235 mc_func[my_idx][mx_idx](dst1, linesize, src1,
EDGE_EMU_LINESIZE, block_h, mx, my);
1238 src2 - my_idx * linesize - mx_idx,
1239 EDGE_EMU_LINESIZE, linesize,
1241 x_off - mx_idx, y_off - my_idx, width, height);
1243 mc_func[my_idx][mx_idx](dst2, linesize, src2,
EDGE_EMU_LINESIZE, block_h, mx, my);
1245 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1246 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1250 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1251 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1258 int bx_off,
int by_off,
1259 int block_w,
int block_h,
1266 ref_frame, mv, x_off + bx_off, y_off + by_off,
1267 block_w, block_h, width, height, s->
linesize,
1275 x_off >>= 1; y_off >>= 1;
1276 bx_off >>= 1; by_off >>= 1;
1277 width >>= 1; height >>= 1;
1278 block_w >>= 1; block_h >>= 1;
1280 dst[2] + by_off * s->
uvlinesize + bx_off, ref_frame,
1281 &uvmv, x_off + bx_off, y_off + by_off,
1282 block_w, block_h, width, height, s->
uvlinesize,
1291 if (s->
ref_count[ref-1] > (mb_xy >> 5)) {
1292 int x_off = mb_x << 4, y_off = mb_y << 4;
1293 int mx = (mb->
mv.
x>>2) + x_off + 8;
1294 int my = (mb->
mv.
y>>2) + y_off;
1301 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->
uvlinesize + 64;
1313 int x_off = mb_x << 4, y_off = mb_y << 4;
1321 0, 0, 16, 16, width,
height, &mb->
mv);
1328 for (y = 0; y < 4; y++) {
1329 for (x = 0; x < 4; x++) {
1332 4*x + x_off, 4*y + y_off, 4, 4,
1339 x_off >>= 1; y_off >>= 1; width >>= 1;
height >>= 1;
1340 for (y = 0; y < 2; y++) {
1341 for (x = 0; x < 2; x++) {
1342 uvmv.
x = mb->
bmv[ 2*y * 4 + 2*x ].
x +
1343 mb->
bmv[ 2*y * 4 + 2*x+1].
x +
1344 mb->
bmv[(2*y+1) * 4 + 2*x ].x +
1345 mb->
bmv[(2*y+1) * 4 + 2*x+1].
x;
1346 uvmv.
y = mb->
bmv[ 2*y * 4 + 2*x ].
y +
1347 mb->
bmv[ 2*y * 4 + 2*x+1].
y +
1348 mb->
bmv[(2*y+1) * 4 + 2*x ].y +
1349 mb->
bmv[(2*y+1) * 4 + 2*x+1].
y;
1350 uvmv.
x = (uvmv.
x + 2 + (uvmv.
x >> (
INT_BIT-1))) >> 2;
1351 uvmv.
y = (uvmv.
y + 2 + (uvmv.
y >> (
INT_BIT-1))) >> 2;
1357 dst[2] + 4*y*s->
uvlinesize + x*4, ref, &uvmv,
1358 4*x + x_off, 4*y + y_off, 4, 4,
1367 0, 0, 16, 8, width,
height, &bmv[0]);
1369 0, 8, 16, 8, width,
height, &bmv[1]);
1373 0, 0, 8, 16, width,
height, &bmv[0]);
1375 8, 0, 8, 16, width,
height, &bmv[1]);
1379 0, 0, 8, 8, width,
height, &bmv[0]);
1381 8, 0, 8, 8, width,
height, &bmv[1]);
1383 0, 8, 8, 8, width,
height, &bmv[2]);
1385 8, 8, 8, 8, width,
height, &bmv[3]);
1397 for (y = 0; y < 4; y++) {
1400 if (nnz4&~0x01010101) {
1401 for (x = 0; x < 4; x++) {
1418 for (ch = 0; ch < 2; ch++) {
1422 if (nnz4&~0x01010101) {
1423 for (y = 0; y < 2; y++) {
1424 for (x = 0; x < 2; x++) {
1431 goto chroma_idct_end;
1445 int interior_limit, filter_level;
1459 filter_level = av_clip_uintp2(filter_level, 6);
1461 interior_limit = filter_level;
1466 interior_limit =
FFMAX(interior_limit, 1);
1475 int mbedge_lim, bedge_lim, hev_thresh;
1481 static const uint8_t hev_thresh_lut[2][64] = {
1482 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1483 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1484 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
1486 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1487 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1488 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1495 bedge_lim = 2*filter_level + inner_limit;
1496 mbedge_lim = bedge_lim + 4;
1498 hev_thresh = hev_thresh_lut[s->
keyframe][filter_level];
1502 mbedge_lim, inner_limit, hev_thresh);
1504 mbedge_lim, inner_limit, hev_thresh);
1509 inner_limit, hev_thresh);
1511 inner_limit, hev_thresh);
1513 inner_limit, hev_thresh);
1515 uvlinesize, bedge_lim,
1516 inner_limit, hev_thresh);
1521 mbedge_lim, inner_limit, hev_thresh);
1523 mbedge_lim, inner_limit, hev_thresh);
1528 linesize, bedge_lim,
1529 inner_limit, hev_thresh);
1531 linesize, bedge_lim,
1532 inner_limit, hev_thresh);
1534 linesize, bedge_lim,
1535 inner_limit, hev_thresh);
1537 dst[2] + 4 * uvlinesize,
1538 uvlinesize, bedge_lim,
1539 inner_limit, hev_thresh);
1545 int mbedge_lim, bedge_lim;
1554 bedge_lim = 2*filter_level + inner_limit;
1555 mbedge_lim = bedge_lim + 4;
1574 #define MARGIN (16 << 2)
1583 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1591 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
1595 prev_frame && prev_frame->
seg_map ?
1606 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)\
1608 int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF);\
1609 if (otd->thread_mb_pos < tmp) {\
1610 pthread_mutex_lock(&otd->lock);\
1611 td->wait_mb_pos = tmp;\
1613 if (otd->thread_mb_pos >= tmp)\
1615 pthread_cond_wait(&otd->cond, &otd->lock);\
1617 td->wait_mb_pos = INT_MAX;\
1618 pthread_mutex_unlock(&otd->lock);\
1622 #define update_pos(td, mb_y, mb_x)\
1624 int pos = (mb_y << 16) | (mb_x & 0xFFFF);\
1625 int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && (num_jobs > 1);\
1626 int is_null = (next_td == NULL) || (prev_td == NULL);\
1627 int pos_check = (is_null) ? 1 :\
1628 (next_td != td && pos >= next_td->wait_mb_pos) ||\
1629 (prev_td != td && pos >= prev_td->wait_mb_pos);\
1630 td->thread_mb_pos = pos;\
1631 if (sliced_threading && pos_check) {\
1632 pthread_mutex_lock(&td->lock);\
1633 pthread_cond_broadcast(&td->cond);\
1634 pthread_mutex_unlock(&td->lock);\
1638 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)
1639 #define update_pos(td, mb_y, mb_x)
1643 int jobnr,
int threadnr)
1648 int mb_x, mb_xy = mb_y*s->
mb_width;
1658 if (mb_y == 0) prev_td = td;
1659 else prev_td = &s->
thread_data[(jobnr + num_jobs - 1)%num_jobs];
1660 if (mb_y == s->
mb_height-1) next_td = td;
1661 else next_td = &s->
thread_data[(jobnr + 1)%num_jobs];
1671 memset(mb - 1, 0,
sizeof(*mb));
1680 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
1682 if (prev_td != td) {
1683 if (threadnr != 0) {
1695 prev_frame && prev_frame->seg_map ?
1696 prev_frame->seg_map->data + mb_xy :
NULL, 0);
1726 if (s->
deblock_filter && num_jobs != 1 && threadnr == num_jobs-1) {
1750 int jobnr,
int threadnr)
1769 if (mb_y == 0) prev_td = td;
1770 else prev_td = &s->
thread_data[(jobnr + num_jobs - 1)%num_jobs];
1771 if (mb_y == s->
mb_height-1) next_td = td;
1772 else next_td = &s->
thread_data[(jobnr + 1)%num_jobs];
1774 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb++) {
1776 if (prev_td != td) {
1784 if (num_jobs == 1) {
1804 int jobnr,
int threadnr)
1812 for (mb_y = jobnr; mb_y < s->
mb_height; mb_y += num_jobs) {
1834 int ret, i, referenced, num_jobs;
1857 for (i = 0; i < 5; i++)
1859 &s->
frames[i] != prev_frame &&
1866 for (i = 0; i < 5; i++)
1867 if (&s->
frames[i] != prev_frame &&
1878 if (curframe->tf.f->data[0])
1892 curframe->tf.f->key_frame = s->
keyframe;
1919 s->
linesize = curframe->tf.f->linesize[0];
2036 #define REBASE(pic) \
2037 pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
2051 s->
prob[0] = s_src->
prob[!s_src->update_probabilities];
2057 if (s_src->frames[i].tf.f->data[0]) {
static void get_quants(VP8Context *s)
VP8Macroblock * macroblocks
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
static const uint8_t vp8_submv_prob[5][3]
static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
discard all frames except keyframes
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
(only used in prediction) no split MVs
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
static void update_lf_deltas(VP8Context *s)
This structure describes decoded (raw) audio or video data.
int8_t sign_bias[4]
one state [0, 1] per ref frame type
int coded_width
Bitstream width / height, may be different from width/height e.g.
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
uint8_t * intra4x4_pred_mode_top
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
static void vp8_decode_flush(AVCodecContext *avctx)
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
#define DECLARE_ALIGNED(n, t, v)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
struct VP8Context::@65 segmentation
Base parameters for segmentation, i.e.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f)
static const uint8_t zigzag_scan[16]
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static const uint8_t vp8_pred8x8c_prob_inter[3]
static const uint8_t vp8_mbsplits[5][16]
enum AVDiscard skip_frame
static const int8_t vp8_pred16x16_tree_intra[4][2]
static av_always_inline void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
uint8_t intra4x4_pred_mode_top[4]
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
Multithreading support functions.
av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
void(* vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static const uint8_t vp8_mv_update_prob[2][19]
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
int update_last
update VP56_FRAME_PREVIOUS with the current one
static int update_dimensions(VP8Context *s, int width, int height)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
static void parse_segment_info(VP8Context *s)
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
vp8_mc_func put_pixels_tab[3][3][3]
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
uint8_t intra4x4_pred_mode_mb[16]
uint8_t intra4x4_pred_mode_left[4]
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
static const uint8_t vp8_mbsplit_count[4]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int8_t vp8_coeff_band_indexes[8][10]
static const uint8_t vp8_pred4x4_mode[]
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
static const uint8_t vp8_dct_cat2_prob[]
static const uint8_t vp8_mv_default_prob[2][19]
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const int sizes[][2]
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int pthread_mutex_init(pthread_mutex_t *m, void *attr)
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static const uint8_t vp8_mbfirstidx[4][16]
#define EDGE_EMU_LINESIZE
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
VP8Macroblock * macroblocks_base
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
static const uint8_t vp8_pred4x4_prob_inter[9]
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9])
struct VP8Context::@66 filter
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static const uint8_t vp8_pred16x16_prob_inter[4]
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int pthread_mutex_destroy(pthread_mutex_t *m)
static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Motion vector coding, 17.1.
#define FF_THREAD_FRAME
Decode more than one frame at once.
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
int width
picture width / height.
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_cold int vp8_init_frames(VP8Context *s)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
static av_unused int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
int16_t luma_dc_qmul[2]
luma dc-only block quant
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
uint8_t(* top_border)[16+8+8]
#define vp56_rac_get_prob
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
static const int8_t mv[256][2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
static void pthread_cond_destroy(pthread_cond_t *cond)
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
static void close(AVCodecParserContext *s)
uint8_t * data
The data buffer.
VP8Frame * next_framep[4]
int mb_layout
This describes the macroblock memory layout.
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
static const uint8_t vp8_mbsplit_prob[3]
VP56RangeCoder c
header context, includes mb modes and motion vectors
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
VP56RangeCoder coeff_partition[8]
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const int8_t vp8_pred16x16_tree_inter[4][2]
struct VP8Context::@68 lf_delta
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
VP8FilterStrength * filter_strength
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
static const int8_t vp8_pred4x4_tree[9][2]
uint8_t enabled
whether each mb can have a different strength based on mode/ref
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
static const uint8_t subpel_idx[3][8]
static void update_refs(VP8Context *s)
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
static const uint8_t vp8_coeff_band[16]
struct VP8Context::@67 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
int allocate_progress
Whether to allocate progress for frame threading.
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
static const uint8_t vp8_pred16x16_prob_intra[4]
static const int8_t vp8_segmentid_tree[][2]
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
void(* vp8_h_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
2 8x16 blocks (horizontal)
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
discard all non reference
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
enum AVDiscard skip_loop_filter
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
static av_cold int init(AVCodecParserContext *s)
static const SiprModeParam modes[MODE_COUNT]
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
struct VP8Context::@69 prob[2]
These are all of the updatable probabilities for binary decisions.
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define update_pos(td, mb_y, mb_x)
struct AVCodecInternal * internal
Private context used for internal data.
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout)
Split motion vector prediction, 16.4.
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout)
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
int8_t filter_level[4]
base loop filter level
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static const int vp8_mode_contexts[6][4]
static const uint8_t vp8_dct_cat1_prob[]
#define FFSWAP(type, a, b)
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top)
static int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2])
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
const uint8_t *const ff_vp8_dct_cat_prob[]
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
VP8ThreadData * thread_data
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)