69 int shift = av_log2(denom - 1) + 1;
70 uint64_t ret = (1ULL << 52) / denom;
71 uint64_t err = (1ULL << 52) - ret * denom;
75 return ret + err / denom;
88 uint64_t l = x * (mantissa & 0xffffffff);
89 uint64_t h = x * (mantissa >> 32);
92 l += 1 << av_log2(h >> 21);
99 return (x << 1) ^ (x >> 7);
104 static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
111 for (i = 0; i < 7; i++) {
120 if (bits < 0 || bits > 31) {
123 }
else if (bits == 0) {
138 int i, j, scale_factor;
139 unsigned prob, cumulative_target;
140 unsigned cumul_prob = 0;
141 unsigned scaled_cumul_prob = 0;
144 rac->
prob[257] = UINT_MAX;
146 for (i = 1; i < 257; i++) {
151 if ((uint64_t)cumul_prob + rac->
prob[i] > UINT_MAX) {
155 cumul_prob += rac->
prob[i];
163 for (j = 0; j < prob; j++)
174 scale_factor = av_log2(cumul_prob);
176 if (cumul_prob & (cumul_prob - 1)) {
178 for (i = 1; i < 257; i++) {
180 scaled_cumul_prob += rac->
prob[i];
184 cumulative_target = 1 << scale_factor;
186 if (scaled_cumul_prob > cumulative_target) {
188 "Scaled probabilities are larger than target!\n");
192 scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
194 for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
213 rac->
scale = scale_factor;
216 for (i = 1; i < 257; i++)
223 uint8_t *diff,
int w,
int *left,
236 for (i = 0; i < w; i++) {
237 l =
mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
257 L = buf[width - stride - 1];
265 TL = buf[width - (2 *
stride) - 1];
287 memset(dst + i, 0, count);
302 if (l->
zeros == esc_count) {
316 const uint8_t *src,
const uint8_t *src_end,
317 int width,
int esc_count)
321 uint8_t zero_run = 0;
322 const uint8_t *src_start = src;
323 uint8_t mask1 = -(esc_count < 2);
324 uint8_t mask2 = -(esc_count < 3);
325 uint8_t *end = dst + (width - 2);
330 if (end - dst < count) {
335 memset(dst, 0, count);
342 while (!zero_run && dst + i < end) {
344 if (src + i >= src_end)
347 !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
364 return src_start - src;
371 const uint8_t *src,
int src_size)
377 int esc_count = src[0];
380 const uint8_t *src_end = src + src_size;
387 if (esc_count &&
AV_RL32(src + 1) < length) {
399 for (i = 0; i <
height; i++)
405 "Output more bytes than length (%d of %d)\n", read,
407 }
else if (esc_count < 8) {
411 for (i = 0; i <
height; i++) {
413 src_end, width, esc_count);
419 if (src_size < width * height)
422 for (i = 0; i <
height; i++) {
423 memcpy(dst + (i * stride), src, width);
427 }
else if (esc_count == 0xff) {
429 for (i = 0; i <
height; i++)
430 memset(dst + i * stride, src[1], width);
437 "Invalid zero run escape code! (%#x)\n", esc_count);
441 for (i = 0; i <
height; i++) {
460 const uint8_t *buf = avpkt->
data;
461 int buf_size = avpkt->
size;
464 uint8_t frametype = 0;
465 uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
467 uint8_t *srcs[4], *dst;
468 int i, j, planes = 3;
493 for (j = 0; j < avctx->
height; j++) {
494 for (i = 0; i < avctx->
width; i++)
495 AV_WN32(dst + i * 4, offset_gu);
524 for (i = 0; i < planes; i++)
526 if (offset_ry >= buf_size ||
527 offset_gu >= buf_size ||
528 offset_bv >= buf_size ||
529 (planes == 4 && offs[3] >= buf_size)) {
531 "Invalid frame offsets\n");
534 for (i = 0; i < planes; i++)
540 for (i = 0; i < planes; i++)
543 for (i = 0; i < avctx->
width; i++) {
560 for (i = 0; i < planes; i++)
572 if (offset_ry >= buf_size ||
573 offset_gu >= buf_size ||
574 offset_bv >= buf_size) {
576 "Invalid frame offsets\n");
582 buf_size - offset_ry);
585 buf + offset_gu, buf_size - offset_gu);
588 buf + offset_bv, buf_size - offset_bv);
592 "Unsupported Lagarith frame type: %#x\n", frametype);