27 #define RC_VARIANCE 1 // use variance or ssd for fast rc
36 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
37 #define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples.
45 #define LAMBDA_FRAC_BITS 10
50 for (i = 0; i < 4; i++) {
51 block[0] = pixels[0]; block[1] = pixels[1];
52 block[2] = pixels[2]; block[3] = pixels[3];
53 block[4] = pixels[4]; block[5] = pixels[5];
54 block[6] = pixels[6]; block[7] = pixels[7];
58 memcpy(block, block - 8,
sizeof(*block) * 8);
59 memcpy(block + 8, block - 16,
sizeof(*block) * 8);
60 memcpy(block + 16, block - 24,
sizeof(*block) * 8);
61 memcpy(block + 24, block - 32,
sizeof(*block) * 8);
70 for (i = 0; i < 4; i++) {
71 memcpy(block + i * 8, pixels + i * line_size, 8 *
sizeof(*block));
72 memcpy(block - (i+1) * 8, pixels + i * line_size, 8 *
sizeof(*block));
77 int n,
int qscale,
int *overflow)
81 int last_non_zero = 0;
87 block[0] = (block[0] + 2) >> 2;
89 for (i = 1; i < 64; ++i) {
91 int sign = block[j] >> 31;
92 int level = (block[j] ^ sign) - sign;
94 block[j] = (level ^ sign) - sign;
114 for (level = -max_level; level < max_level; level++) {
115 for (run = 0; run < 2; run++) {
116 int index = (level<<1)|run;
117 int sign, offset = 0, alevel =
level;
121 offset = (alevel-1)>>6;
124 for (j = 0; j < 257; j++) {
139 assert(!alevel || j < 257);
146 for (i = 0; i < 62; i++) {
160 uint16_t weight_matrix[64] = {1,};
171 for (i = 1; i < 64; i++) {
177 for (i = 1; i < 64; i++) {
184 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
185 for (i = 0; i < 64; i++) {
193 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
194 for (i = 1; i < 64; i++) {
233 int i,
index, bit_depth;
333 const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
337 memcpy(buf, header_prefix, 5);
357 ctx->
msip = buf + 0x170;
365 nbits = av_log2_16bit(-2*diff);
368 nbits = av_log2_16bit(2*diff);
376 int last_non_zero = 0;
382 for (i = 1; i <= last_index; i++) {
386 int run_level = i - last_non_zero - 1;
387 int rlevel = (slevel<<1)|!!run_level;
399 const uint8_t *weight_matrix;
405 for (i = 1; i <= last_index; i++) {
410 level = (1-2*
level) * qscale * weight_matrix[i];
412 if (weight_matrix[i] != 8)
416 if (weight_matrix[i] != 32)
422 level = (2*level+1) * qscale * weight_matrix[i];
424 if (weight_matrix[i] != 8)
428 if (weight_matrix[i] != 32)
442 for (i = 0; i < 64; i++)
443 score += (block[i] - qblock[i]) * (block[i] - qblock[i]);
449 int last_non_zero = 0;
452 for (i = 1; i <= last_index; i++) {
456 int run_level = i - last_non_zero - 1;
467 const int bw = 1 << bs;
468 const uint8_t *ptr_y = ctx->
thread[0]->
src[0] + ((mb_y << 4) * ctx->
m.
linesize) + (mb_x << bs+1);
469 const uint8_t *ptr_u = ctx->
thread[0]->
src[1] + ((mb_y << 4) * ctx->
m.
uvlinesize) + (mb_x << bs);
470 const uint8_t *ptr_v = ctx->
thread[0]->
src[2] + ((mb_y << 4) * ctx->
m.
uvlinesize) + (mb_x << bs);
514 int mb_y = jobnr, mb_x;
517 ctx = ctx->
thread[threadnr];
523 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
524 unsigned mb = mb_y * ctx->
m.
mb_width + mb_x;
532 for (i = 0; i < 8; i++) {
534 int overflow, nbits, diff, last_index;
542 if (diff < 0) nbits = av_log2_16bit(-2*diff);
543 else nbits = av_log2_16bit( 2*diff);
545 assert(nbits < ctx->cid_table->bit_depth + 4);
565 int mb_y = jobnr, mb_x;
566 ctx = ctx->
thread[threadnr];
572 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
573 unsigned mb = mb_y * ctx->
m.
mb_width + mb_x;
581 for (i = 0; i < 8; i++) {
601 for (mb_y = 0; mb_y < ctx->
m.
mb_height; mb_y++) {
605 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
606 unsigned mb = mb_y * ctx->
m.
mb_width + mb_x;
612 offset += thread_size;
619 int mb_y = jobnr, mb_x;
620 ctx = ctx->
thread[threadnr];
623 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x, pix += 16) {
624 unsigned mb = mb_y * ctx->
m.
mb_width + mb_x;
631 int const linesize = ctx->
m.
linesize >> 1;
632 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x) {
633 uint16_t *pix = (uint16_t*)ctx->
thread[0]->
src[0] + ((mb_y << 4) * linesize) + (mb_x << 4);
634 unsigned mb = mb_y * ctx->
m.
mb_width + mb_x;
640 for (i = 0; i < 16; ++i) {
641 for (j = 0; j < 16; ++j) {
643 int const sample = (unsigned)pix[j] >> 6;
645 sqsum += sample * sample;
661 int lambda, up_step, down_step;
662 int last_lower = INT_MAX, last_higher = 0;
665 for (q = 1; q < avctx->
qmax; q++) {
675 if (lambda == last_higher) {
681 unsigned min = UINT_MAX;
684 for (q = 1; q < avctx->
qmax; q++) {
685 unsigned score = ctx->
mb_rc[q][mb].
bits*lambda+
696 bits = (bits+31)&~31;
707 if (bits < ctx->frame_bits) {
708 last_lower =
FFMIN(lambda, last_lower);
709 if (last_higher != 0)
710 lambda = (lambda+last_higher)>>1;
713 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
715 lambda =
FFMAX(1, lambda);
716 if (lambda == last_lower)
719 last_higher =
FFMAX(lambda, last_higher);
720 if (last_lower != INT_MAX)
721 lambda = (lambda+last_lower)>>1;
722 else if ((int64_t)lambda + up_step > INT_MAX)
726 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
741 int last_lower = INT_MAX;
754 bits = (bits+31)&~31;
760 if (bits < ctx->frame_bits) {
763 if (last_higher == qscale - 1) {
764 qscale = last_higher;
767 last_lower =
FFMIN(qscale, last_lower);
768 if (last_higher != 0)
769 qscale = (qscale+last_higher)>>1;
771 qscale -= down_step++;
776 if (last_lower == qscale + 1)
778 last_higher =
FFMAX(qscale, last_higher);
779 if (last_lower != INT_MAX)
780 qscale = (qscale+last_lower)>>1;
793 #define BUCKET_BITS 8
794 #define RADIX_PASSES 4
795 #define NBUCKETS (1 << BUCKET_BITS)
807 memset(buckets, 0,
sizeof(buckets[0][0]) *
RADIX_PASSES * NBUCKETS);
808 for (i = 0; i <
size; i++) {
818 for (i = NBUCKETS - 1; i >= 0; i--)
819 buckets[j][i] = offset -= buckets[j][i];
820 assert(!buckets[j][0]);
828 for (i = 0; i <
size; i++) {
830 int pos = buckets[
v]++;
890 for (i = 0; i < 3; i++) {
912 if (buf_size < ctx->cid_table->frame_size) {
920 for (i = 0; i < 3; i++) {
934 "picture could not fit ratecontrol constraints, increase qmax\n");
959 goto encode_coding_unit;
1007 .priv_class = &
class,