Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52866 views
1
/*****************************************************************************
2
* rdo.c: rate-distortion optimization
3
*****************************************************************************
4
* Copyright (C) 2005-2016 x264 project
5
*
6
* Authors: Loren Merritt <[email protected]>
7
* Fiona Glaser <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License as published by
11
* the Free Software Foundation; either version 2 of the License, or
12
* (at your option) any later version.
13
*
14
* This program is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
* GNU General Public License for more details.
18
*
19
* You should have received a copy of the GNU General Public License
20
* along with this program; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22
*
23
* This program is also available under a commercial proprietary license.
24
* For more information, contact us at [email protected].
25
*****************************************************************************/
26
27
/* duplicate all the writer functions, just calculating bit cost
28
* instead of writing the bitstream.
29
* TODO: use these for fast 1st pass too. */
30
31
#define RDO_SKIP_BS 1
32
33
/* Transition and size tables for abs<9 MVD and residual coding */
34
/* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */
35
uint8_t x264_cabac_transition_unary[15][128];
36
uint16_t x264_cabac_size_unary[15][128];
37
/* Transition and size tables for abs>9 MVD */
38
/* Consist of 5 1s and a bypass sign bit */
39
static uint8_t cabac_transition_5ones[128];
40
static uint16_t cabac_size_5ones[128];
41
42
/* CAVLC: produces exactly the same bit count as a normal encode */
43
/* this probably still leaves some unnecessary computations */
44
#define bs_write1(s,v) ((s)->i_bits_encoded += 1)
45
#define bs_write(s,n,v) ((s)->i_bits_encoded += (n))
46
#define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v))
47
#define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v))
48
#define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l))
49
#define x264_macroblock_write_cavlc static x264_macroblock_size_cavlc
50
#include "cavlc.c"
51
52
/* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of
53
* fractional bits, but only finite precision. */
54
#undef x264_cabac_encode_decision
55
#undef x264_cabac_encode_decision_noup
56
#undef x264_cabac_encode_bypass
57
#undef x264_cabac_encode_terminal
58
#define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
59
#define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
60
#define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
61
#define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256)
62
#define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8)
63
#define x264_macroblock_write_cabac static x264_macroblock_size_cabac
64
#include "cabac.c"
65
66
#define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
67
sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) - (CHROMA444 ? 0 : (1024+12)-460) )
68
#define COPY_CABAC_PART( pos, size )\
69
memcpy( &cb->state[pos], &h->cabac.state[pos], size )
70
71
static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
72
{
73
static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
74
static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
75
static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
76
int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
77
+ hadamard_offset[size];
78
uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
79
if( res )
80
return res - 1;
81
else
82
{
83
pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
84
res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
85
h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
86
return res;
87
}
88
}
89
90
static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
91
{
92
static const uint8_t satd_shift_x[3] = {3, 2, 2};
93
static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
94
static const uint8_t satd_offset[3] = {0, 8, 16};
95
ALIGNED_16( static pixel zero[16] ) = {0};
96
int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
97
+ satd_offset[size - PIXEL_8x4];
98
int res = h->mb.pic.fenc_satd_cache[cache_index];
99
if( res )
100
return res - 1;
101
else
102
{
103
pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
104
int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
105
res = h->pixf.satd[size]( fenc, FENC_STRIDE, zero, 0 ) - dc;
106
h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
107
return res;
108
}
109
}
110
111
/* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */
112
/* SATD and SA8D are used to measure block complexity. */
113
/* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */
114
/* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */
115
116
/* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */
117
/* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */
118
/* This optimization can also be used in non-RD transform decision. */
119
120
static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
121
{
122
ALIGNED_16( static pixel zero[16] ) = {0};
123
int satd = 0;
124
pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
125
pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
126
if( p == 0 && h->mb.i_psy_rd )
127
{
128
/* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
129
if( size <= PIXEL_8x8 )
130
{
131
uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE );
132
uint64_t fenc_acs = cached_hadamard( h, size, x, y );
133
satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs)
134
+ abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32));
135
satd >>= 1;
136
}
137
else
138
{
139
int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, zero, 0 ) >> 1;
140
satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, zero, 0 ) - dc - cached_satd( h, size, x, y ));
141
}
142
satd = (satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8;
143
}
144
return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd;
145
}
146
147
static inline int ssd_mb( x264_t *h )
148
{
149
int chroma_size = h->luma2chroma_pixel[PIXEL_16x16];
150
int chroma_ssd = ssd_plane(h, chroma_size, 1, 0, 0) + ssd_plane(h, chroma_size, 2, 0, 0);
151
chroma_ssd = ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
152
return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chroma_ssd;
153
}
154
155
static int x264_rd_cost_mb( x264_t *h, int i_lambda2 )
156
{
157
int b_transform_bak = h->mb.b_transform_8x8;
158
int i_ssd;
159
int i_bits;
160
int type_bak = h->mb.i_type;
161
162
x264_macroblock_encode( h );
163
164
if( h->mb.b_deblock_rdo )
165
x264_macroblock_deblock( h );
166
167
i_ssd = ssd_mb( h );
168
169
if( IS_SKIP( h->mb.i_type ) )
170
{
171
i_bits = (1 * i_lambda2 + 128) >> 8;
172
}
173
else if( h->param.b_cabac )
174
{
175
x264_cabac_t cabac_tmp;
176
COPY_CABAC;
177
x264_macroblock_size_cabac( h, &cabac_tmp );
178
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16;
179
}
180
else
181
{
182
x264_macroblock_size_cavlc( h );
183
i_bits = ( (uint64_t)h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8;
184
}
185
186
h->mb.b_transform_8x8 = b_transform_bak;
187
h->mb.i_type = type_bak;
188
189
return X264_MIN( i_ssd + i_bits, COST_MAX );
190
}
191
192
/* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
193
194
static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
195
{
196
uint64_t i_ssd, i_bits;
197
198
x264_macroblock_encode_p4x4( h, i4 );
199
if( i_pixel == PIXEL_8x4 )
200
x264_macroblock_encode_p4x4( h, i4+1 );
201
if( i_pixel == PIXEL_4x8 )
202
x264_macroblock_encode_p4x4( h, i4+2 );
203
204
i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
205
if( CHROMA444 )
206
{
207
int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
208
+ ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
209
chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
210
i_ssd += chromassd;
211
}
212
213
if( h->param.b_cabac )
214
{
215
x264_cabac_t cabac_tmp;
216
COPY_CABAC;
217
x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
218
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
219
}
220
else
221
i_bits = x264_subpartition_size_cavlc( h, i4, i_pixel );
222
223
return (i_ssd<<8) + i_bits;
224
}
225
226
uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
227
{
228
uint64_t i_ssd, i_bits;
229
int i8 = i4 >> 2;
230
231
if( i_pixel == PIXEL_16x16 )
232
{
233
int i_cost = x264_rd_cost_mb( h, i_lambda2 );
234
return i_cost;
235
}
236
237
if( i_pixel > PIXEL_8x8 )
238
return x264_rd_cost_subpart( h, i_lambda2, i4, i_pixel );
239
240
h->mb.i_cbp_luma = 0;
241
242
x264_macroblock_encode_p8x8( h, i8 );
243
if( i_pixel == PIXEL_16x8 )
244
x264_macroblock_encode_p8x8( h, i8+1 );
245
if( i_pixel == PIXEL_8x16 )
246
x264_macroblock_encode_p8x8( h, i8+2 );
247
248
int ssd_x = 8*(i8&1);
249
int ssd_y = 8*(i8>>1);
250
i_ssd = ssd_plane( h, i_pixel, 0, ssd_x, ssd_y );
251
int chromapix = h->luma2chroma_pixel[i_pixel];
252
int chromassd = ssd_plane( h, chromapix, 1, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT )
253
+ ssd_plane( h, chromapix, 2, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT );
254
i_ssd += ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
255
256
if( h->param.b_cabac )
257
{
258
x264_cabac_t cabac_tmp;
259
COPY_CABAC;
260
x264_partition_size_cabac( h, &cabac_tmp, i8, i_pixel );
261
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
262
}
263
else
264
i_bits = (uint64_t)x264_partition_size_cavlc( h, i8, i_pixel ) * i_lambda2;
265
266
return (i_ssd<<8) + i_bits;
267
}
268
269
static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[4][32] )
270
{
271
uint64_t i_ssd, i_bits;
272
int plane_count = CHROMA444 ? 3 : 1;
273
int i_qp = h->mb.i_qp;
274
h->mb.i_cbp_luma &= ~(1<<i8);
275
h->mb.b_transform_8x8 = 1;
276
277
for( int p = 0; p < plane_count; p++ )
278
{
279
x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p], 1 );
280
i_qp = h->mb.i_chroma_qp;
281
}
282
283
i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
284
if( CHROMA444 )
285
{
286
int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 )
287
+ ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 );
288
chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
289
i_ssd += chromassd;
290
}
291
292
if( h->param.b_cabac )
293
{
294
x264_cabac_t cabac_tmp;
295
COPY_CABAC;
296
x264_partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
297
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
298
}
299
else
300
i_bits = (uint64_t)x264_partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2;
301
302
return (i_ssd<<8) + i_bits;
303
}
304
305
static uint64_t x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
306
{
307
uint64_t i_ssd, i_bits;
308
int plane_count = CHROMA444 ? 3 : 1;
309
int i_qp = h->mb.i_qp;
310
311
for( int p = 0; p < plane_count; p++ )
312
{
313
x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode, 1 );
314
i_qp = h->mb.i_chroma_qp;
315
}
316
317
i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
318
if( CHROMA444 )
319
{
320
int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
321
+ ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
322
chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
323
i_ssd += chromassd;
324
}
325
326
if( h->param.b_cabac )
327
{
328
x264_cabac_t cabac_tmp;
329
COPY_CABAC;
330
x264_partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
331
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
332
}
333
else
334
i_bits = (uint64_t)x264_partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2;
335
336
return (i_ssd<<8) + i_bits;
337
}
338
339
static uint64_t x264_rd_cost_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
340
{
341
uint64_t i_ssd, i_bits;
342
343
if( b_dct )
344
x264_mb_encode_chroma( h, 0, h->mb.i_chroma_qp );
345
346
int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
347
i_ssd = ssd_plane( h, chromapix, 1, 0, 0 )
348
+ ssd_plane( h, chromapix, 2, 0, 0 );
349
350
h->mb.i_chroma_pred_mode = i_mode;
351
352
if( h->param.b_cabac )
353
{
354
x264_cabac_t cabac_tmp;
355
COPY_CABAC;
356
x264_chroma_size_cabac( h, &cabac_tmp );
357
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
358
}
359
else
360
i_bits = (uint64_t)x264_chroma_size_cavlc( h ) * i_lambda2;
361
362
return (i_ssd<<8) + i_bits;
363
}
364
/****************************************************************************
365
* Trellis RD quantization
366
****************************************************************************/
367
368
#define TRELLIS_SCORE_MAX -1LL // negative marks the node as invalid
369
#define TRELLIS_SCORE_BIAS 1LL<<60; // bias so that all valid scores are positive, even after negative contributions from psy
370
#define CABAC_SIZE_BITS 8
371
#define LAMBDA_BITS 4
372
373
/* precalculate the cost of coding various combinations of bits in a single context */
374
void x264_rdo_init( void )
375
{
376
for( int i_prefix = 0; i_prefix < 15; i_prefix++ )
377
{
378
for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
379
{
380
int f8_bits = 0;
381
uint8_t ctx = i_ctx;
382
383
for( int i = 1; i < i_prefix; i++ )
384
f8_bits += x264_cabac_size_decision2( &ctx, 1 );
385
if( i_prefix > 0 && i_prefix < 14 )
386
f8_bits += x264_cabac_size_decision2( &ctx, 0 );
387
f8_bits += 1 << CABAC_SIZE_BITS; //sign
388
389
x264_cabac_size_unary[i_prefix][i_ctx] = f8_bits;
390
x264_cabac_transition_unary[i_prefix][i_ctx] = ctx;
391
}
392
}
393
for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
394
{
395
int f8_bits = 0;
396
uint8_t ctx = i_ctx;
397
398
for( int i = 0; i < 5; i++ )
399
f8_bits += x264_cabac_size_decision2( &ctx, 1 );
400
f8_bits += 1 << CABAC_SIZE_BITS; //sign
401
402
cabac_size_5ones[i_ctx] = f8_bits;
403
cabac_transition_5ones[i_ctx] = ctx;
404
}
405
}
406
407
typedef struct
408
{
409
uint64_t score;
410
int level_idx; // index into level_tree[]
411
uint8_t cabac_state[4]; // just contexts 0,4,8,9 of the 10 relevant to coding abs_level_m1
412
} trellis_node_t;
413
414
typedef struct
415
{
416
uint16_t next;
417
uint16_t abs_level;
418
} trellis_level_t;
419
420
// TODO:
421
// save cabac state between blocks?
422
// use trellis' RD score instead of x264_mb_decimate_score?
423
// code 8x8 sig/last flags forwards with deadzone and save the contexts at
424
// each position?
425
// change weights when using CQMs?
426
427
// possible optimizations:
428
// make scores fit in 32bit
429
// save quantized coefs during rd, to avoid a duplicate trellis in the final encode
430
// if trellissing all MBRD modes, finish SSD calculation so we can skip all of
431
// the normal dequant/idct/ssd/cabac
432
433
// the unquant_mf here is not the same as dequant_mf:
434
// in normal operation (dct->quant->dequant->idct) the dct and idct are not
435
// normalized. quant/dequant absorb those scaling factors.
436
// in this function, we just do (quant->unquant) and want the output to be
437
// comparable to the input. so unquant is the direct inverse of quant,
438
// and uses the dct scaling factors, not the idct ones.
439
440
#define SIGN(x,y) ((x^(y >> 31))-(y >> 31))
441
442
#define SET_LEVEL(ndst, nsrc, l) {\
443
if( sizeof(trellis_level_t) == sizeof(uint32_t) )\
444
M32( &level_tree[levels_used] ) = pack16to32( nsrc.level_idx, l );\
445
else\
446
level_tree[levels_used] = (trellis_level_t){ nsrc.level_idx, l };\
447
ndst.level_idx = levels_used;\
448
levels_used++;\
449
}
450
451
// encode all values of the dc coef in a block which is known to have no ac
452
static NOINLINE
453
int trellis_dc_shortcut( int sign_coef, int quant_coef, int unquant_mf, int coef_weight, int lambda2, uint8_t *cabac_state, int cost_sig )
454
{
455
uint64_t bscore = TRELLIS_SCORE_MAX;
456
int ret = 0;
457
int q = abs( quant_coef );
458
for( int abs_level = q-1; abs_level <= q; abs_level++ )
459
{
460
int unquant_abs_level = (unquant_mf * abs_level + 128) >> 8;
461
462
/* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */
463
int d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);
464
uint64_t score = (uint64_t)d*d * coef_weight;
465
466
/* code the proposed level, and count how much entropy it would take */
467
if( abs_level )
468
{
469
unsigned f8_bits = cost_sig;
470
int prefix = X264_MIN( abs_level - 1, 14 );
471
f8_bits += x264_cabac_size_decision_noup2( cabac_state+1, prefix > 0 );
472
f8_bits += x264_cabac_size_unary[prefix][cabac_state[5]];
473
if( abs_level >= 15 )
474
f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS;
475
score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
476
}
477
478
COPY2_IF_LT( bscore, score, ret, abs_level );
479
}
480
return SIGN(ret, sign_coef);
481
}
482
483
// encode one value of one coef in one context
484
static ALWAYS_INLINE
485
int trellis_coef( int j, int const_level, int abs_level, int prefix, int suffix_cost,
486
int node_ctx, int level1_ctx, int levelgt1_ctx, uint64_t ssd, int cost_siglast[3],
487
trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
488
trellis_level_t *level_tree, int levels_used, int lambda2, uint8_t *level_state )
489
{
490
uint64_t score = nodes_prev[j].score + ssd;
491
/* code the proposed level, and count how much entropy it would take */
492
unsigned f8_bits = cost_siglast[ j ? 1 : 2 ];
493
uint8_t level1_state = (j >= 3) ? nodes_prev[j].cabac_state[level1_ctx>>2] : level_state[level1_ctx];
494
f8_bits += x264_cabac_entropy[level1_state ^ (const_level > 1)];
495
uint8_t levelgt1_state;
496
if( const_level > 1 )
497
{
498
levelgt1_state = j >= 6 ? nodes_prev[j].cabac_state[levelgt1_ctx-6] : level_state[levelgt1_ctx];
499
f8_bits += x264_cabac_size_unary[prefix][levelgt1_state] + suffix_cost;
500
}
501
else
502
f8_bits += 1 << CABAC_SIZE_BITS;
503
score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
504
505
/* save the node if it's better than any existing node with the same cabac ctx */
506
if( score < nodes_cur[node_ctx].score )
507
{
508
nodes_cur[node_ctx].score = score;
509
if( j == 2 || (j <= 3 && node_ctx == 4) ) // init from input state
510
M32(nodes_cur[node_ctx].cabac_state) = M32(level_state+12);
511
else if( j >= 3 )
512
M32(nodes_cur[node_ctx].cabac_state) = M32(nodes_prev[j].cabac_state);
513
if( j >= 3 ) // skip the transition if we're not going to reuse the context
514
nodes_cur[node_ctx].cabac_state[level1_ctx>>2] = x264_cabac_transition[level1_state][const_level > 1];
515
if( const_level > 1 && node_ctx == 7 )
516
nodes_cur[node_ctx].cabac_state[levelgt1_ctx-6] = x264_cabac_transition_unary[prefix][levelgt1_state];
517
nodes_cur[node_ctx].level_idx = nodes_prev[j].level_idx;
518
SET_LEVEL( nodes_cur[node_ctx], nodes_prev[j], abs_level );
519
}
520
return levels_used;
521
}
522
523
// encode one value of one coef in all contexts, templated by which value that is.
524
// in ctx_lo, the set of live nodes is contiguous and starts at ctx0, so return as soon as we've seen one failure.
525
// in ctx_hi, they're contiguous within each block of 4 ctxs, but not necessarily starting at the beginning,
526
// so exploiting that would be more complicated.
527
static NOINLINE
528
int trellis_coef0_0( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
529
trellis_level_t *level_tree, int levels_used )
530
{
531
nodes_cur[0].score = nodes_prev[0].score + ssd0;
532
nodes_cur[0].level_idx = nodes_prev[0].level_idx;
533
for( int j = 1; j < 4 && (int64_t)nodes_prev[j].score >= 0; j++ )
534
{
535
nodes_cur[j].score = nodes_prev[j].score;
536
if( j >= 3 )
537
M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
538
SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
539
}
540
return levels_used;
541
}
542
543
static NOINLINE
544
int trellis_coef0_1( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
545
trellis_level_t *level_tree, int levels_used )
546
{
547
for( int j = 1; j < 8; j++ )
548
// this branch only affects speed, not function; there's nothing wrong with updating invalid nodes in coef0.
549
if( (int64_t)nodes_prev[j].score >= 0 )
550
{
551
nodes_cur[j].score = nodes_prev[j].score;
552
if( j >= 3 )
553
M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
554
SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
555
}
556
return levels_used;
557
}
558
559
#define COEF(const_level, ctx_hi, j, ...)\
560
if( !j || (int64_t)nodes_prev[j].score >= 0 )\
561
levels_used = trellis_coef( j, const_level, abs_level, prefix, suffix_cost, __VA_ARGS__,\
562
j?ssd1:ssd0, cost_siglast, nodes_cur, nodes_prev,\
563
level_tree, levels_used, lambda2, level_state );\
564
else if( !ctx_hi )\
565
return levels_used;
566
567
static NOINLINE
568
int trellis_coef1_0( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
569
trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
570
trellis_level_t *level_tree, int levels_used, int lambda2,
571
uint8_t *level_state )
572
{
573
int abs_level = 1, prefix = 1, suffix_cost = 0;
574
COEF( 1, 0, 0, 1, 1, 0 );
575
COEF( 1, 0, 1, 2, 2, 0 );
576
COEF( 1, 0, 2, 3, 3, 0 );
577
COEF( 1, 0, 3, 3, 4, 0 );
578
return levels_used;
579
}
580
581
static NOINLINE
582
int trellis_coef1_1( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
583
trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
584
trellis_level_t *level_tree, int levels_used, int lambda2,
585
uint8_t *level_state )
586
{
587
int abs_level = 1, prefix = 1, suffix_cost = 0;
588
COEF( 1, 1, 1, 2, 2, 0 );
589
COEF( 1, 1, 2, 3, 3, 0 );
590
COEF( 1, 1, 3, 3, 4, 0 );
591
COEF( 1, 1, 4, 4, 0, 0 );
592
COEF( 1, 1, 5, 5, 0, 0 );
593
COEF( 1, 1, 6, 6, 0, 0 );
594
COEF( 1, 1, 7, 7, 0, 0 );
595
return levels_used;
596
}
597
598
static NOINLINE
599
int trellis_coefn_0( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
600
trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
601
trellis_level_t *level_tree, int levels_used, int lambda2,
602
uint8_t *level_state, int levelgt1_ctx )
603
{
604
int prefix = X264_MIN( abs_level-1, 14 );
605
int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
606
COEF( 2, 0, 0, 4, 1, 5 );
607
COEF( 2, 0, 1, 4, 2, 5 );
608
COEF( 2, 0, 2, 4, 3, 5 );
609
COEF( 2, 0, 3, 4, 4, 5 );
610
return levels_used;
611
}
612
613
static NOINLINE
614
int trellis_coefn_1( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
615
trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
616
trellis_level_t *level_tree, int levels_used, int lambda2,
617
uint8_t *level_state, int levelgt1_ctx )
618
{
619
int prefix = X264_MIN( abs_level-1, 14 );
620
int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
621
COEF( 2, 1, 1, 4, 2, 5 );
622
COEF( 2, 1, 2, 4, 3, 5 );
623
COEF( 2, 1, 3, 4, 4, 5 );
624
COEF( 2, 1, 4, 5, 0, 6 );
625
COEF( 2, 1, 5, 6, 0, 7 );
626
COEF( 2, 1, 6, 7, 0, 8 );
627
COEF( 2, 1, 7, 7, 0, levelgt1_ctx );
628
return levels_used;
629
}
630
631
static ALWAYS_INLINE
632
int quant_trellis_cabac( x264_t *h, dctcoef *dct,
633
udctcoef *quant_mf, udctcoef *quant_bias, const int *unquant_mf,
634
const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
635
int b_chroma, int dc, int num_coefs, int idx )
636
{
637
ALIGNED_ARRAY_N( dctcoef, orig_coefs, [64] );
638
ALIGNED_ARRAY_N( dctcoef, quant_coefs, [64] );
639
const uint32_t *coef_weight1 = num_coefs == 64 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
640
const uint32_t *coef_weight2 = num_coefs == 64 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
641
const int b_interlaced = MB_INTERLACED;
642
uint8_t *cabac_state_sig = &h->cabac.state[ x264_significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
643
uint8_t *cabac_state_last = &h->cabac.state[ x264_last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
644
int levelgt1_ctx = b_chroma && dc ? 8 : 9;
645
646
if( dc )
647
{
648
if( num_coefs == 16 )
649
{
650
memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
651
if( !h->quantf.quant_4x4_dc( dct, quant_mf[0] >> 1, quant_bias[0] << 1 ) )
652
return 0;
653
h->zigzagf.scan_4x4( quant_coefs, dct );
654
}
655
else
656
{
657
memcpy( orig_coefs, dct, sizeof(dctcoef)*num_coefs );
658
int nz = h->quantf.quant_2x2_dc( &dct[0], quant_mf[0] >> 1, quant_bias[0] << 1 );
659
if( num_coefs == 8 )
660
nz |= h->quantf.quant_2x2_dc( &dct[4], quant_mf[0] >> 1, quant_bias[0] << 1 );
661
if( !nz )
662
return 0;
663
for( int i = 0; i < num_coefs; i++ )
664
quant_coefs[i] = dct[zigzag[i]];
665
}
666
}
667
else
668
{
669
if( num_coefs == 64 )
670
{
671
h->mc.memcpy_aligned( orig_coefs, dct, sizeof(dctcoef)*64 );
672
if( !h->quantf.quant_8x8( dct, quant_mf, quant_bias ) )
673
return 0;
674
h->zigzagf.scan_8x8( quant_coefs, dct );
675
}
676
else //if( num_coefs == 16 )
677
{
678
memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
679
if( !h->quantf.quant_4x4( dct, quant_mf, quant_bias ) )
680
return 0;
681
h->zigzagf.scan_4x4( quant_coefs, dct );
682
}
683
}
684
685
int last_nnz = h->quantf.coeff_last[ctx_block_cat]( quant_coefs+b_ac )+b_ac;
686
uint8_t *cabac_state = &h->cabac.state[ x264_coeff_abs_level_m1_offset[ctx_block_cat] ];
687
688
/* shortcut for dc-only blocks.
689
* this doesn't affect the output, but saves some unnecessary computation. */
690
if( last_nnz == 0 && !dc )
691
{
692
int cost_sig = x264_cabac_size_decision_noup2( &cabac_state_sig[0], 1 )
693
+ x264_cabac_size_decision_noup2( &cabac_state_last[0], 1 );
694
dct[0] = trellis_dc_shortcut( orig_coefs[0], quant_coefs[0], unquant_mf[0], coef_weight2[0], lambda2, cabac_state, cost_sig );
695
return !!dct[0];
696
}
697
698
#if HAVE_MMX && ARCH_X86_64
699
#define TRELLIS_ARGS unquant_mf, zigzag, lambda2, last_nnz, orig_coefs, quant_coefs, dct,\
700
cabac_state_sig, cabac_state_last, M64(cabac_state), M16(cabac_state+8)
701
if( num_coefs == 16 && !dc )
702
if( b_chroma || !h->mb.i_psy_trellis )
703
return h->quantf.trellis_cabac_4x4( TRELLIS_ARGS, b_ac );
704
else
705
return h->quantf.trellis_cabac_4x4_psy( TRELLIS_ARGS, b_ac, h->mb.pic.fenc_dct4[idx&15], h->mb.i_psy_trellis );
706
else if( num_coefs == 64 && !dc )
707
if( b_chroma || !h->mb.i_psy_trellis )
708
return h->quantf.trellis_cabac_8x8( TRELLIS_ARGS, b_interlaced );
709
else
710
return h->quantf.trellis_cabac_8x8_psy( TRELLIS_ARGS, b_interlaced, h->mb.pic.fenc_dct8[idx&3], h->mb.i_psy_trellis);
711
else if( num_coefs == 8 && dc )
712
return h->quantf.trellis_cabac_chroma_422_dc( TRELLIS_ARGS );
713
else if( dc )
714
return h->quantf.trellis_cabac_dc( TRELLIS_ARGS, num_coefs-1 );
715
#endif
716
717
// (# of coefs) * (# of ctx) * (# of levels tried) = 1024
718
// we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
719
// but it takes more time to remove dead states than you gain in reduced memory.
720
trellis_level_t level_tree[64*8*2];
721
int levels_used = 1;
722
/* init trellis */
723
trellis_node_t nodes[2][8];
724
trellis_node_t *nodes_cur = nodes[0];
725
trellis_node_t *nodes_prev = nodes[1];
726
trellis_node_t *bnode;
727
for( int j = 1; j < 4; j++ )
728
nodes_cur[j].score = TRELLIS_SCORE_MAX;
729
nodes_cur[0].score = TRELLIS_SCORE_BIAS;
730
nodes_cur[0].level_idx = 0;
731
level_tree[0].abs_level = 0;
732
level_tree[0].next = 0;
733
ALIGNED_4( uint8_t level_state[16] );
734
memcpy( level_state, cabac_state, 10 );
735
level_state[12] = cabac_state[0]; // packed subset for copying into trellis_node_t
736
level_state[13] = cabac_state[4];
737
level_state[14] = cabac_state[8];
738
level_state[15] = cabac_state[9];
739
740
idx &= num_coefs == 64 ? 3 : 15;
741
742
// coefs are processed in reverse order, because that's how the abs value is coded.
743
// last_coef and significant_coef flags are normally coded in forward order, but
744
// we have to reverse them to match the levels.
745
// in 4x4 blocks, last_coef and significant_coef use a separate context for each
746
// position, so the order doesn't matter, and we don't even have to update their contexts.
747
// in 8x8 blocks, some positions share contexts, so we'll just have to hope that
748
// cabac isn't too sensitive.
749
int i = last_nnz;
750
#define TRELLIS_LOOP(ctx_hi)\
751
for( ; i >= b_ac; i-- )\
752
{\
753
/* skip 0s: this doesn't affect the output, but saves some unnecessary computation. */\
754
if( !quant_coefs[i] )\
755
{\
756
/* no need to calculate ssd of 0s: it's the same in all nodes.\
757
* no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
758
* subtracting from one score is equivalent to adding to the rest. */\
759
if( !ctx_hi )\
760
{\
761
int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
762
b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
763
uint64_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )\
764
* (uint64_t)lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
765
nodes_cur[0].score -= cost_sig0;\
766
}\
767
for( int j = 1; j < (ctx_hi?8:4); j++ )\
768
SET_LEVEL( nodes_cur[j], nodes_cur[j], 0 );\
769
continue;\
770
}\
771
\
772
int sign_coef = orig_coefs[zigzag[i]];\
773
int abs_coef = abs( sign_coef );\
774
int q = abs( quant_coefs[i] );\
775
int cost_siglast[3]; /* { zero, nonzero, nonzero-and-last } */\
776
XCHG( trellis_node_t*, nodes_cur, nodes_prev );\
777
for( int j = ctx_hi; j < 8; j++ )\
778
nodes_cur[j].score = TRELLIS_SCORE_MAX;\
779
\
780
if( i < num_coefs-1 || ctx_hi )\
781
{\
782
int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
783
b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
784
int lastindex = !dc && num_coefs == 64 ? x264_last_coeff_flag_offset_8x8[i] :\
785
b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
786
cost_siglast[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );\
787
int cost_sig1 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );\
788
cost_siglast[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 ) + cost_sig1;\
789
if( !ctx_hi )\
790
cost_siglast[2] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 ) + cost_sig1;\
791
}\
792
else\
793
{\
794
cost_siglast[0] = cost_siglast[1] = cost_siglast[2] = 0;\
795
}\
796
\
797
/* there are a few cases where increasing the coeff magnitude helps,\
798
* but it's only around .003 dB, and skipping them ~doubles the speed of trellis.\
799
* could also try q-2: that sometimes helps, but also sometimes decimates blocks\
800
* that are better left coded, especially at QP > 40. */\
801
uint64_t ssd0[2], ssd1[2];\
802
for( int k = 0; k < 2; k++ )\
803
{\
804
int abs_level = q-1+k;\
805
int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);\
806
int d = abs_coef - unquant_abs_level;\
807
/* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */\
808
if( h->mb.i_psy_trellis && i && !dc && !b_chroma )\
809
{\
810
int orig_coef = (num_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];\
811
int predicted_coef = orig_coef - sign_coef;\
812
int psy_value = abs(unquant_abs_level + SIGN(predicted_coef, sign_coef));\
813
int psy_weight = coef_weight1[zigzag[i]] * h->mb.i_psy_trellis;\
814
ssd1[k] = (uint64_t)d*d * coef_weight2[zigzag[i]] - psy_weight * psy_value;\
815
}\
816
else\
817
/* FIXME: for i16x16 dc is this weight optimal? */\
818
ssd1[k] = (uint64_t)d*d * (dc?256:coef_weight2[zigzag[i]]);\
819
ssd0[k] = ssd1[k];\
820
if( !i && !dc && !ctx_hi )\
821
{\
822
/* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */\
823
d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);\
824
ssd0[k] = (uint64_t)d*d * coef_weight2[zigzag[i]];\
825
}\
826
}\
827
\
828
/* argument passing imposes some significant overhead here. gcc's interprocedural register allocation isn't up to it. */\
829
switch( q )\
830
{\
831
case 1:\
832
ssd1[0] += (uint64_t)cost_siglast[0] * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
833
levels_used = trellis_coef0_##ctx_hi( ssd0[0]-ssd1[0], nodes_cur, nodes_prev, level_tree, levels_used );\
834
levels_used = trellis_coef1_##ctx_hi( ssd0[1]-ssd1[0], ssd1[1]-ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
835
goto next##ctx_hi;\
836
case 2:\
837
levels_used = trellis_coef1_##ctx_hi( ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
838
levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
839
goto next1;\
840
default:\
841
levels_used = trellis_coefn_##ctx_hi( q-1, ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
842
levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
843
goto next1;\
844
}\
845
next##ctx_hi:;\
846
}\
847
/* output levels from the best path through the trellis */\
848
bnode = &nodes_cur[ctx_hi];\
849
for( int j = ctx_hi+1; j < (ctx_hi?8:4); j++ )\
850
if( nodes_cur[j].score < bnode->score )\
851
bnode = &nodes_cur[j];
852
853
// keep 2 versions of the main quantization loop, depending on which subsets of the node_ctxs are live
854
// node_ctx 0..3, i.e. having not yet encountered any coefs that might be quantized to >1
855
TRELLIS_LOOP(0);
856
857
if( bnode == &nodes_cur[0] )
858
{
859
/* We only need to zero an empty 4x4 block. 8x8 can be
860
implicitly emptied via zero nnz, as can dc. */
861
if( num_coefs == 16 && !dc )
862
memset( dct, 0, 16 * sizeof(dctcoef) );
863
return 0;
864
}
865
866
if(0) // accessible only by goto, not fallthrough
867
{
868
// node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef)
869
TRELLIS_LOOP(1);
870
}
871
872
int level = bnode->level_idx;
873
for( i = b_ac; i <= last_nnz; i++ )
874
{
875
dct[zigzag[i]] = SIGN(level_tree[level].abs_level, dct[zigzag[i]]);
876
level = level_tree[level].next;
877
}
878
879
return 1;
880
}
881
882
/* FIXME: This is a gigantic hack. See below.
883
*
884
* CAVLC is much more difficult to trellis than CABAC.
885
*
886
* CABAC has only three states to track: significance map, last, and the
887
* level state machine.
888
* CAVLC, by comparison, has five: coeff_token (trailing + total),
889
* total_zeroes, zero_run, and the level state machine.
890
*
891
* I know of no paper that has managed to design a close-to-optimal trellis
892
* that covers all five of these and isn't exponential-time. As a result, this
893
* "trellis" isn't: it's just a QNS search. Patches welcome for something better.
894
* It's actually surprisingly fast, albeit not quite optimal. It's pretty close
895
* though; since CAVLC only has 2^16 possible rounding modes (assuming only two
896
* roundings as options), a bruteforce search is feasible. Testing shows
897
* that this QNS is reasonably close to optimal in terms of compression.
898
*
899
* TODO:
900
* Don't bother changing large coefficients when it wouldn't affect bit cost
901
* (e.g. only affecting bypassed suffix bits).
902
* Don't re-run all parts of CAVLC bit cost calculation when not necessary.
903
* e.g. when changing a coefficient from one non-zero value to another in
904
* such a way that trailing ones and suffix length isn't affected. */
905
static ALWAYS_INLINE
906
int quant_trellis_cavlc( x264_t *h, dctcoef *dct,
907
const udctcoef *quant_mf, const int *unquant_mf,
908
const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
909
int b_chroma, int dc, int num_coefs, int idx, int b_8x8 )
910
{
911
ALIGNED_16( dctcoef quant_coefs[2][16] );
912
ALIGNED_16( dctcoef coefs[16] ) = {0};
913
const uint32_t *coef_weight1 = b_8x8 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
914
const uint32_t *coef_weight2 = b_8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
915
int delta_distortion[16];
916
int64_t score = 1ULL<<62;
917
int i, j;
918
const int f = 1<<15;
919
int nC = b_chroma && dc ? 3 + (num_coefs>>2)
920
: ct_index[x264_mb_predict_non_zero_code( h, !b_chroma && dc ? (idx - LUMA_DC)*16 : idx )];
921
922
/* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different
923
* step/start/end than internal processing. */
924
int step = 1;
925
int start = b_ac;
926
int end = num_coefs - 1;
927
if( b_8x8 )
928
{
929
start = idx&3;
930
end = 60 + start;
931
step = 4;
932
}
933
idx &= 15;
934
935
lambda2 <<= LAMBDA_BITS;
936
937
/* Find last non-zero coefficient. */
938
for( i = end; i >= start; i -= step )
939
if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
940
break;
941
942
if( i < start )
943
goto zeroblock;
944
945
/* Prepare for QNS search: calculate distortion caused by each DCT coefficient
946
* rounding to be searched.
947
*
948
* We only search two roundings (nearest and nearest-1) like in CABAC trellis,
949
* so we just store the difference in distortion between them. */
950
int last_nnz = b_8x8 ? i >> 2 : i;
951
int coef_mask = 0;
952
int round_mask = 0;
953
for( i = b_ac, j = start; i <= last_nnz; i++, j += step )
954
{
955
int coef = dct[zigzag[j]];
956
int abs_coef = abs(coef);
957
int sign = coef < 0 ? -1 : 1;
958
int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
959
quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant;
960
coefs[i] = quant_coefs[1][i];
961
if( nearest_quant )
962
{
963
/* We initialize the trellis with a deadzone halfway between nearest rounding
964
* and always-round-down. This gives much better results than initializing to either
965
* extreme.
966
* FIXME: should we initialize to the deadzones used by deadzone quant? */
967
int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
968
int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8);
969
int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8);
970
int d1 = abs_coef - unquant1;
971
int d0 = abs_coef - unquant0;
972
delta_distortion[i] = (d0*d0 - d1*d1) * (dc?256:coef_weight2[zigzag[j]]);
973
974
/* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
975
if( h->mb.i_psy_trellis && j && !dc && !b_chroma )
976
{
977
int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]];
978
int predicted_coef = orig_coef - coef;
979
int psy_weight = coef_weight1[zigzag[j]];
980
int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign);
981
int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign);
982
delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight;
983
}
984
985
quant_coefs[0][i] = sign * (nearest_quant-1);
986
if( deadzone_quant != nearest_quant )
987
coefs[i] = quant_coefs[0][i];
988
else
989
round_mask |= 1 << i;
990
}
991
else
992
delta_distortion[i] = 0;
993
coef_mask |= (!!coefs[i]) << i;
994
}
995
996
/* Calculate the cost of the starting state. */
997
h->out.bs.i_bits_encoded = 0;
998
if( !coef_mask )
999
bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
1000
else
1001
x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
1002
score = (int64_t)h->out.bs.i_bits_encoded * lambda2;
1003
1004
/* QNS loop: pick the change that improves RD the most, apply it, repeat.
1005
* coef_mask and round_mask are used to simplify tracking of nonzeroness
1006
* and rounding modes chosen. */
1007
while( 1 )
1008
{
1009
int64_t iter_score = score;
1010
int iter_distortion_delta = 0;
1011
int iter_coef = -1;
1012
int iter_mask = coef_mask;
1013
int iter_round = round_mask;
1014
for( i = b_ac; i <= last_nnz; i++ )
1015
{
1016
if( !delta_distortion[i] )
1017
continue;
1018
1019
/* Set up all the variables for this iteration. */
1020
int cur_round = round_mask ^ (1 << i);
1021
int round_change = (cur_round >> i)&1;
1022
int old_coef = coefs[i];
1023
int new_coef = quant_coefs[round_change][i];
1024
int cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i);
1025
int cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1);
1026
int64_t cur_score = cur_distortion_delta;
1027
coefs[i] = new_coef;
1028
1029
/* Count up bits. */
1030
h->out.bs.i_bits_encoded = 0;
1031
if( !cur_mask )
1032
bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
1033
else
1034
x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
1035
cur_score += (int64_t)h->out.bs.i_bits_encoded * lambda2;
1036
1037
coefs[i] = old_coef;
1038
if( cur_score < iter_score )
1039
{
1040
iter_score = cur_score;
1041
iter_coef = i;
1042
iter_mask = cur_mask;
1043
iter_round = cur_round;
1044
iter_distortion_delta = cur_distortion_delta;
1045
}
1046
}
1047
if( iter_coef >= 0 )
1048
{
1049
score = iter_score - iter_distortion_delta;
1050
coef_mask = iter_mask;
1051
round_mask = iter_round;
1052
coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef];
1053
/* Don't try adjusting coefficients we've already adjusted.
1054
* Testing suggests this doesn't hurt results -- and sometimes actually helps. */
1055
delta_distortion[iter_coef] = 0;
1056
}
1057
else
1058
break;
1059
}
1060
1061
if( coef_mask )
1062
{
1063
for( i = b_ac, j = start; i < num_coefs; i++, j += step )
1064
dct[zigzag[j]] = coefs[i];
1065
return 1;
1066
}
1067
1068
zeroblock:
1069
if( !dc )
1070
{
1071
if( b_8x8 )
1072
for( i = start; i <= end; i+=step )
1073
dct[zigzag[i]] = 0;
1074
else
1075
memset( dct, 0, 16*sizeof(dctcoef) );
1076
}
1077
return 0;
1078
}
1079
1080
int x264_quant_luma_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, int i_qp, int ctx_block_cat, int b_intra, int idx )
1081
{
1082
if( h->param.b_cabac )
1083
return quant_trellis_cabac( h, dct,
1084
h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
1085
h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1086
ctx_block_cat, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx );
1087
1088
return quant_trellis_cavlc( h, dct,
1089
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1090
DCT_LUMA_DC, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx, 0 );
1091
}
1092
1093
static const uint8_t x264_zigzag_scan2x2[4] = { 0, 1, 2, 3 };
1094
static const uint8_t x264_zigzag_scan2x4[8] = { 0, 2, 1, 4, 6, 3, 5, 7 };
1095
1096
int x264_quant_chroma_dc_trellis( x264_t *h, dctcoef *dct, int i_qp, int b_intra, int idx )
1097
{
1098
const uint8_t *zigzag;
1099
int num_coefs;
1100
int quant_cat = CQM_4IC+1 - b_intra;
1101
1102
if( CHROMA_FORMAT == CHROMA_422 )
1103
{
1104
zigzag = x264_zigzag_scan2x4;
1105
num_coefs = 8;
1106
}
1107
else
1108
{
1109
zigzag = x264_zigzag_scan2x2;
1110
num_coefs = 4;
1111
}
1112
1113
if( h->param.b_cabac )
1114
return quant_trellis_cabac( h, dct,
1115
h->quant4_mf[quant_cat][i_qp], h->quant4_bias0[quant_cat][i_qp],
1116
h->unquant4_mf[quant_cat][i_qp], zigzag,
1117
DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx );
1118
1119
return quant_trellis_cavlc( h, dct,
1120
h->quant4_mf[quant_cat][i_qp], h->unquant4_mf[quant_cat][i_qp], zigzag,
1121
DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx, 0 );
1122
}
1123
1124
int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
1125
int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
1126
{
1127
static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};
1128
int b_ac = ctx_ac[ctx_block_cat];
1129
if( h->param.b_cabac )
1130
return quant_trellis_cabac( h, dct,
1131
h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
1132
h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1133
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx );
1134
1135
return quant_trellis_cavlc( h, dct,
1136
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
1137
x264_zigzag_scan4[MB_INTERLACED],
1138
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 );
1139
}
1140
1141
int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
1142
int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
1143
{
1144
if( h->param.b_cabac )
1145
{
1146
return quant_trellis_cabac( h, dct,
1147
h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias0[i_quant_cat][i_qp],
1148
h->unquant8_mf[i_quant_cat][i_qp], x264_zigzag_scan8[MB_INTERLACED],
1149
ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx );
1150
}
1151
1152
/* 8x8 CAVLC is split into 4 4x4 blocks */
1153
int nzaccum = 0;
1154
for( int i = 0; i < 4; i++ )
1155
{
1156
int nz = quant_trellis_cavlc( h, dct,
1157
h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
1158
x264_zigzag_scan8[MB_INTERLACED],
1159
DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 );
1160
/* Set up nonzero count for future calls */
1161
h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
1162
nzaccum |= nz;
1163
}
1164
STORE_8x8_NNZ( 0, idx, 0 );
1165
return nzaccum;
1166
}
1167
1168