Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52867 views
1
/*****************************************************************************
2
* checkasm.c: assembly check tool
3
*****************************************************************************
4
* Copyright (C) 2003-2016 x264 project
5
*
6
* Authors: Loren Merritt <[email protected]>
7
* Laurent Aimar <[email protected]>
8
* Fiona Glaser <[email protected]>
9
*
10
* This program is free software; you can redistribute it and/or modify
11
* it under the terms of the GNU General Public License as published by
12
* the Free Software Foundation; either version 2 of the License, or
13
* (at your option) any later version.
14
*
15
* This program is distributed in the hope that it will be useful,
16
* but WITHOUT ANY WARRANTY; without even the implied warranty of
17
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
* GNU General Public License for more details.
19
*
20
* You should have received a copy of the GNU General Public License
21
* along with this program; if not, write to the Free Software
22
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23
*
24
* This program is also available under a commercial proprietary license.
25
* For more information, contact us at [email protected].
26
*****************************************************************************/
27
28
#include <ctype.h>
29
#include "common/common.h"
30
#include "common/cpu.h"
31
32
// GCC doesn't align stack variables on ARM, so use .bss
33
#if ARCH_ARM
34
#undef ALIGNED_16
35
#define ALIGNED_16( var ) DECLARE_ALIGNED( static var, 16 )
36
#endif
37
38
/* buf1, buf2: initialised to random data and shouldn't write into them */
39
uint8_t *buf1, *buf2;
40
/* buf3, buf4: used to store output */
41
uint8_t *buf3, *buf4;
42
/* pbuf1, pbuf2: initialised to random pixel data and shouldn't write into them. */
43
pixel *pbuf1, *pbuf2;
44
/* pbuf3, pbuf4: point to buf3, buf4, just for type convenience */
45
pixel *pbuf3, *pbuf4;
46
47
int quiet = 0;
48
49
#define report( name ) { \
50
if( used_asm && !quiet ) \
51
fprintf( stderr, " - %-21s [%s]\n", name, ok ? "OK" : "FAILED" ); \
52
if( !ok ) ret = -1; \
53
}
54
55
#define BENCH_RUNS 100 // tradeoff between accuracy and speed
56
#define BENCH_ALIGNS 16 // number of stack+heap data alignments (another accuracy vs speed tradeoff)
57
#define MAX_FUNCS 1000 // just has to be big enough to hold all the existing functions
58
#define MAX_CPUS 30 // number of different combinations of cpu flags
59
60
typedef struct
61
{
62
void *pointer; // just for detecting duplicates
63
uint32_t cpu;
64
uint64_t cycles;
65
uint32_t den;
66
} bench_t;
67
68
typedef struct
69
{
70
char *name;
71
bench_t vers[MAX_CPUS];
72
} bench_func_t;
73
74
int do_bench = 0;
75
int bench_pattern_len = 0;
76
const char *bench_pattern = "";
77
char func_name[100];
78
static bench_func_t benchs[MAX_FUNCS];
79
80
static const char *pixel_names[12] = { "16x16", "16x8", "8x16", "8x8", "8x4", "4x8", "4x4", "4x16", "4x2", "2x8", "2x4", "2x2" };
81
static const char *intra_predict_16x16_names[7] = { "v", "h", "dc", "p", "dcl", "dct", "dc8" };
82
static const char *intra_predict_8x8c_names[7] = { "dc", "h", "v", "p", "dcl", "dct", "dc8" };
83
static const char *intra_predict_4x4_names[12] = { "v", "h", "dc", "ddl", "ddr", "vr", "hd", "vl", "hu", "dcl", "dct", "dc8" };
84
static const char **intra_predict_8x8_names = intra_predict_4x4_names;
85
static const char **intra_predict_8x16c_names = intra_predict_8x8c_names;
86
87
#define set_func_name(...) snprintf( func_name, sizeof(func_name), __VA_ARGS__ )
88
89
static inline uint32_t read_time(void)
90
{
91
uint32_t a = 0;
92
#if HAVE_X86_INLINE_ASM
93
asm volatile( "lfence \n"
94
"rdtsc \n"
95
: "=a"(a) :: "edx", "memory" );
96
#elif ARCH_PPC
97
asm volatile( "mftb %0" : "=r"(a) :: "memory" );
98
#elif ARCH_ARM // ARMv7 only
99
asm volatile( "mrc p15, 0, %0, c9, c13, 0" : "=r"(a) :: "memory" );
100
#elif ARCH_AARCH64
101
uint64_t b = 0;
102
asm volatile( "mrs %0, pmccntr_el0" : "=r"(b) :: "memory" );
103
a = b;
104
#elif ARCH_MIPS
105
asm volatile( "rdhwr %0, $2" : "=r"(a) :: "memory" );
106
#endif
107
return a;
108
}
109
110
static bench_t* get_bench( const char *name, int cpu )
111
{
112
int i, j;
113
for( i = 0; benchs[i].name && strcmp(name, benchs[i].name); i++ )
114
assert( i < MAX_FUNCS );
115
if( !benchs[i].name )
116
benchs[i].name = strdup( name );
117
if( !cpu )
118
return &benchs[i].vers[0];
119
for( j = 1; benchs[i].vers[j].cpu && benchs[i].vers[j].cpu != cpu; j++ )
120
assert( j < MAX_CPUS );
121
benchs[i].vers[j].cpu = cpu;
122
return &benchs[i].vers[j];
123
}
124
125
static int cmp_nop( const void *a, const void *b )
126
{
127
return *(uint16_t*)a - *(uint16_t*)b;
128
}
129
130
static int cmp_bench( const void *a, const void *b )
131
{
132
// asciibetical sort except preserving numbers
133
const char *sa = ((bench_func_t*)a)->name;
134
const char *sb = ((bench_func_t*)b)->name;
135
for( ;; sa++, sb++ )
136
{
137
if( !*sa && !*sb )
138
return 0;
139
if( isdigit( *sa ) && isdigit( *sb ) && isdigit( sa[1] ) != isdigit( sb[1] ) )
140
return isdigit( sa[1] ) - isdigit( sb[1] );
141
if( *sa != *sb )
142
return *sa - *sb;
143
}
144
}
145
146
static void print_bench(void)
147
{
148
uint16_t nops[10000];
149
int nfuncs, nop_time=0;
150
151
for( int i = 0; i < 10000; i++ )
152
{
153
uint32_t t = read_time();
154
nops[i] = read_time() - t;
155
}
156
qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
157
for( int i = 500; i < 9500; i++ )
158
nop_time += nops[i];
159
nop_time /= 900;
160
printf( "nop: %d\n", nop_time );
161
162
for( nfuncs = 0; nfuncs < MAX_FUNCS && benchs[nfuncs].name; nfuncs++ );
163
qsort( benchs, nfuncs, sizeof(bench_func_t), cmp_bench );
164
for( int i = 0; i < nfuncs; i++ )
165
for( int j = 0; j < MAX_CPUS && (!j || benchs[i].vers[j].cpu); j++ )
166
{
167
int k;
168
bench_t *b = &benchs[i].vers[j];
169
if( !b->den )
170
continue;
171
for( k = 0; k < j && benchs[i].vers[k].pointer != b->pointer; k++ );
172
if( k < j )
173
continue;
174
printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
175
#if HAVE_MMX
176
b->cpu&X264_CPU_AVX2 ? "avx2" :
177
b->cpu&X264_CPU_FMA3 ? "fma3" :
178
b->cpu&X264_CPU_FMA4 ? "fma4" :
179
b->cpu&X264_CPU_XOP ? "xop" :
180
b->cpu&X264_CPU_AVX ? "avx" :
181
b->cpu&X264_CPU_SSE42 ? "sse42" :
182
b->cpu&X264_CPU_SSE4 ? "sse4" :
183
b->cpu&X264_CPU_SSSE3 ? "ssse3" :
184
b->cpu&X264_CPU_SSE3 ? "sse3" :
185
/* print sse2slow only if there's also a sse2fast version of the same func */
186
b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS-1 && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
187
b->cpu&X264_CPU_SSE2 ? "sse2" :
188
b->cpu&X264_CPU_SSE ? "sse" :
189
b->cpu&X264_CPU_MMX ? "mmx" :
190
#elif ARCH_PPC
191
b->cpu&X264_CPU_ALTIVEC ? "altivec" :
192
#elif ARCH_ARM
193
b->cpu&X264_CPU_NEON ? "neon" :
194
b->cpu&X264_CPU_ARMV6 ? "armv6" :
195
#elif ARCH_AARCH64
196
b->cpu&X264_CPU_NEON ? "neon" :
197
b->cpu&X264_CPU_ARMV8 ? "armv8" :
198
#elif ARCH_MIPS
199
b->cpu&X264_CPU_MSA ? "msa" :
200
#endif
201
"c",
202
#if HAVE_MMX
203
b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
204
b->cpu&X264_CPU_SLOW_ATOM && b->cpu&X264_CPU_CACHELINE_64 ? "_c64_atom" :
205
b->cpu&X264_CPU_CACHELINE_64 ? "_c64" :
206
b->cpu&X264_CPU_SLOW_SHUFFLE ? "_slowshuffle" :
207
b->cpu&X264_CPU_LZCNT ? "_lzcnt" :
208
b->cpu&X264_CPU_BMI2 ? "_bmi2" :
209
b->cpu&X264_CPU_BMI1 ? "_bmi1" :
210
b->cpu&X264_CPU_SLOW_CTZ ? "_slow_ctz" :
211
b->cpu&X264_CPU_SLOW_ATOM ? "_atom" :
212
#elif ARCH_ARM
213
b->cpu&X264_CPU_FAST_NEON_MRC ? "_fast_mrc" :
214
#endif
215
"",
216
(int64_t)(10*b->cycles/b->den - nop_time)/4 );
217
}
218
}
219
220
#if ARCH_X86 || ARCH_X86_64
221
int x264_stack_pagealign( int (*func)(), int align );
222
223
/* detect when callee-saved regs aren't saved
224
* needs an explicit asm check because it only sometimes crashes in normal use. */
225
intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
226
#else
227
#define x264_stack_pagealign( func, align ) func()
228
#endif
229
230
#if ARCH_AARCH64
231
intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
232
#endif
233
234
#if ARCH_ARM
235
intptr_t x264_checkasm_call_neon( intptr_t (*func)(), int *ok, ... );
236
intptr_t x264_checkasm_call_noneon( intptr_t (*func)(), int *ok, ... );
237
intptr_t (*x264_checkasm_call)( intptr_t (*func)(), int *ok, ... ) = x264_checkasm_call_noneon;
238
#endif
239
240
#define call_c1(func,...) func(__VA_ARGS__)
241
242
#if ARCH_X86_64
243
/* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended to 64-bit.
244
* This is done by clobbering the stack with junk around the stack pointer and calling the
245
* assembly function through x264_checkasm_call with added dummy arguments which forces all
246
* real arguments to be passed on the stack and not in registers. For 32-bit argument the
247
* upper half of the 64-bit register location on the stack will now contain junk. Note that
248
* this is dependant on compiler behaviour and that interrupts etc. at the wrong time may
249
* overwrite the junk written to the stack so there's no guarantee that it will always
250
* detect all functions that assumes zero-extension.
251
*/
252
void x264_checkasm_stack_clobber( uint64_t clobber, ... );
253
#define call_a1(func,...) ({ \
254
uint64_t r = (rand() & 0xffff) * 0x0001000100010001ULL; \
255
x264_checkasm_stack_clobber( r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r ); /* max_args+6 */ \
256
x264_checkasm_call(( intptr_t(*)())func, &ok, 0, 0, 0, 0, __VA_ARGS__ ); })
257
#elif ARCH_X86 || (ARCH_AARCH64 && !defined(__APPLE__)) || ARCH_ARM
258
#define call_a1(func,...) x264_checkasm_call( (intptr_t(*)())func, &ok, __VA_ARGS__ )
259
#else
260
#define call_a1 call_c1
261
#endif
262
263
#if ARCH_ARM
264
#define call_a1_64(func,...) ((uint64_t (*)(intptr_t(*)(), int*, ...))x264_checkasm_call)( (intptr_t(*)())func, &ok, __VA_ARGS__ )
265
#else
266
#define call_a1_64 call_a1
267
#endif
268
269
#define call_bench(func,cpu,...)\
270
if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
271
{\
272
uint64_t tsum = 0;\
273
int tcount = 0;\
274
call_a1(func, __VA_ARGS__);\
275
for( int ti = 0; ti < (cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
276
{\
277
uint32_t t = read_time();\
278
func(__VA_ARGS__);\
279
func(__VA_ARGS__);\
280
func(__VA_ARGS__);\
281
func(__VA_ARGS__);\
282
t = read_time() - t;\
283
if( (uint64_t)t*tcount <= tsum*4 && ti > 0 )\
284
{\
285
tsum += t;\
286
tcount++;\
287
}\
288
}\
289
bench_t *b = get_bench( func_name, cpu );\
290
b->cycles += tsum;\
291
b->den += tcount;\
292
b->pointer = func;\
293
}
294
295
/* for most functions, run benchmark and correctness test at the same time.
296
* for those that modify their inputs, run the above macros separately */
297
#define call_a(func,...) ({ call_a2(func,__VA_ARGS__); call_a1(func,__VA_ARGS__); })
298
#define call_c(func,...) ({ call_c2(func,__VA_ARGS__); call_c1(func,__VA_ARGS__); })
299
#define call_a2(func,...) ({ call_bench(func,cpu_new,__VA_ARGS__); })
300
#define call_c2(func,...) ({ call_bench(func,0,__VA_ARGS__); })
301
#define call_a64(func,...) ({ call_a2(func,__VA_ARGS__); call_a1_64(func,__VA_ARGS__); })
302
303
304
static int check_pixel( int cpu_ref, int cpu_new )
305
{
306
x264_pixel_function_t pixel_c;
307
x264_pixel_function_t pixel_ref;
308
x264_pixel_function_t pixel_asm;
309
x264_predict_t predict_4x4[12];
310
x264_predict8x8_t predict_8x8[12];
311
x264_predict_8x8_filter_t predict_8x8_filter;
312
ALIGNED_16( pixel edge[36] );
313
uint16_t cost_mv[32];
314
int ret = 0, ok, used_asm;
315
316
x264_pixel_init( 0, &pixel_c );
317
x264_pixel_init( cpu_ref, &pixel_ref );
318
x264_pixel_init( cpu_new, &pixel_asm );
319
x264_predict_4x4_init( 0, predict_4x4 );
320
x264_predict_8x8_init( 0, predict_8x8, &predict_8x8_filter );
321
predict_8x8_filter( pbuf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
322
323
// maximize sum
324
for( int i = 0; i < 256; i++ )
325
{
326
int z = i|(i>>4);
327
z ^= z>>2;
328
z ^= z>>1;
329
pbuf4[i] = -(z&1) & PIXEL_MAX;
330
pbuf3[i] = ~pbuf4[i] & PIXEL_MAX;
331
}
332
// random pattern made of maxed pixel differences, in case an intermediate value overflows
333
for( int i = 256; i < 0x1000; i++ )
334
{
335
pbuf4[i] = -(pbuf1[i&~0x88]&1) & PIXEL_MAX;
336
pbuf3[i] = ~(pbuf4[i]) & PIXEL_MAX;
337
}
338
339
#define TEST_PIXEL( name, align ) \
340
ok = 1, used_asm = 0; \
341
for( int i = 0; i < ARRAY_ELEMS(pixel_c.name); i++ ) \
342
{ \
343
int res_c, res_asm; \
344
if( pixel_asm.name[i] != pixel_ref.name[i] ) \
345
{ \
346
set_func_name( "%s_%s", #name, pixel_names[i] ); \
347
used_asm = 1; \
348
for( int j = 0; j < 64; j++ ) \
349
{ \
350
res_c = call_c( pixel_c.name[i], pbuf1, (intptr_t)16, pbuf2+j*!align, (intptr_t)64 ); \
351
res_asm = call_a( pixel_asm.name[i], pbuf1, (intptr_t)16, pbuf2+j*!align, (intptr_t)64 ); \
352
if( res_c != res_asm ) \
353
{ \
354
ok = 0; \
355
fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \
356
break; \
357
} \
358
} \
359
for( int j = 0; j < 0x1000 && ok; j += 256 ) \
360
{ \
361
res_c = pixel_c .name[i]( pbuf3+j, 16, pbuf4+j, 16 ); \
362
res_asm = pixel_asm.name[i]( pbuf3+j, 16, pbuf4+j, 16 ); \
363
if( res_c != res_asm ) \
364
{ \
365
ok = 0; \
366
fprintf( stderr, #name "[%d]: overflow %d != %d\n", i, res_c, res_asm ); \
367
} \
368
} \
369
} \
370
} \
371
report( "pixel " #name " :" );
372
373
TEST_PIXEL( sad, 0 );
374
TEST_PIXEL( sad_aligned, 1 );
375
TEST_PIXEL( ssd, 1 );
376
TEST_PIXEL( satd, 0 );
377
TEST_PIXEL( sa8d, 1 );
378
379
ok = 1, used_asm = 0;
380
if( pixel_asm.sa8d_satd[PIXEL_16x16] != pixel_ref.sa8d_satd[PIXEL_16x16] )
381
{
382
set_func_name( "sa8d_satd_%s", pixel_names[PIXEL_16x16] );
383
used_asm = 1;
384
for( int j = 0; j < 64; j++ )
385
{
386
uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
387
uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
388
uint64_t res_a = call_a64( pixel_asm.sa8d_satd[PIXEL_16x16], pbuf1, (intptr_t)16, pbuf2, (intptr_t)64 );
389
uint32_t cost8_a = res_a;
390
uint32_t cost4_a = res_a >> 32;
391
if( cost8_a != cost8_c || cost4_a != cost4_c )
392
{
393
ok = 0;
394
fprintf( stderr, "sa8d_satd [%d]: (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
395
cost8_c, cost4_c, cost8_a, cost4_a );
396
break;
397
}
398
}
399
for( int j = 0; j < 0x1000 && ok; j += 256 ) \
400
{
401
uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
402
uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
403
uint64_t res_a = pixel_asm.sa8d_satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
404
uint32_t cost8_a = res_a;
405
uint32_t cost4_a = res_a >> 32;
406
if( cost8_a != cost8_c || cost4_a != cost4_c )
407
{
408
ok = 0;
409
fprintf( stderr, "sa8d_satd [%d]: overflow (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
410
cost8_c, cost4_c, cost8_a, cost4_a );
411
}
412
}
413
}
414
report( "pixel sa8d_satd :" );
415
416
#define TEST_PIXEL_X( N ) \
417
ok = 1; used_asm = 0; \
418
for( int i = 0; i < 7; i++ ) \
419
{ \
420
ALIGNED_16( int res_c[4] ) = {0}; \
421
ALIGNED_16( int res_asm[4] ) = {0}; \
422
if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
423
{ \
424
set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
425
used_asm = 1; \
426
for( int j = 0; j < 64; j++ ) \
427
{ \
428
pixel *pix2 = pbuf2+j; \
429
res_c[0] = pixel_c.sad[i]( pbuf1, 16, pix2, 64 ); \
430
res_c[1] = pixel_c.sad[i]( pbuf1, 16, pix2+6, 64 ); \
431
res_c[2] = pixel_c.sad[i]( pbuf1, 16, pix2+1, 64 ); \
432
if( N == 4 ) \
433
{ \
434
res_c[3] = pixel_c.sad[i]( pbuf1, 16, pix2+10, 64 ); \
435
call_a( pixel_asm.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
436
} \
437
else \
438
call_a( pixel_asm.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
439
if( memcmp(res_c, res_asm, N*sizeof(int)) ) \
440
{ \
441
ok = 0; \
442
fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \
443
i, res_c[0], res_c[1], res_c[2], res_c[3], \
444
res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
445
} \
446
if( N == 4 ) \
447
call_c2( pixel_c.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
448
else \
449
call_c2( pixel_c.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
450
} \
451
} \
452
} \
453
report( "pixel sad_x"#N" :" );
454
455
TEST_PIXEL_X(3);
456
TEST_PIXEL_X(4);
457
458
#define TEST_PIXEL_VAR( i ) \
459
if( pixel_asm.var[i] != pixel_ref.var[i] ) \
460
{ \
461
set_func_name( "%s_%s", "var", pixel_names[i] ); \
462
used_asm = 1; \
463
/* abi-check wrapper can't return uint64_t, so separate it from return value check */ \
464
call_c1( pixel_c.var[i], pbuf1, 16 ); \
465
call_a1( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
466
uint64_t res_c = pixel_c.var[i]( pbuf1, 16 ); \
467
uint64_t res_asm = pixel_asm.var[i]( pbuf1, 16 ); \
468
if( res_c != res_asm ) \
469
{ \
470
ok = 0; \
471
fprintf( stderr, "var[%d]: %d %d != %d %d [FAILED]\n", i, (int)res_c, (int)(res_c>>32), (int)res_asm, (int)(res_asm>>32) ); \
472
} \
473
call_c2( pixel_c.var[i], pbuf1, (intptr_t)16 ); \
474
call_a2( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
475
}
476
477
ok = 1; used_asm = 0;
478
TEST_PIXEL_VAR( PIXEL_16x16 );
479
TEST_PIXEL_VAR( PIXEL_8x16 );
480
TEST_PIXEL_VAR( PIXEL_8x8 );
481
report( "pixel var :" );
482
483
#define TEST_PIXEL_VAR2( i ) \
484
if( pixel_asm.var2[i] != pixel_ref.var2[i] ) \
485
{ \
486
int res_c, res_asm, ssd_c, ssd_asm; \
487
set_func_name( "%s_%s", "var2", pixel_names[i] ); \
488
used_asm = 1; \
489
res_c = call_c( pixel_c.var2[i], pbuf1, (intptr_t)16, pbuf2, (intptr_t)16, &ssd_c ); \
490
res_asm = call_a( pixel_asm.var2[i], pbuf1, (intptr_t)16, pbuf2, (intptr_t)16, &ssd_asm ); \
491
if( res_c != res_asm || ssd_c != ssd_asm ) \
492
{ \
493
ok = 0; \
494
fprintf( stderr, "var2[%d]: %d != %d or %d != %d [FAILED]\n", i, res_c, res_asm, ssd_c, ssd_asm ); \
495
} \
496
}
497
498
ok = 1; used_asm = 0;
499
TEST_PIXEL_VAR2( PIXEL_8x16 );
500
TEST_PIXEL_VAR2( PIXEL_8x8 );
501
report( "pixel var2 :" );
502
503
ok = 1; used_asm = 0;
504
for( int i = 0; i < 4; i++ )
505
if( pixel_asm.hadamard_ac[i] != pixel_ref.hadamard_ac[i] )
506
{
507
set_func_name( "hadamard_ac_%s", pixel_names[i] );
508
used_asm = 1;
509
for( int j = 0; j < 32; j++ )
510
{
511
pixel *pix = (j&16 ? pbuf1 : pbuf3) + (j&15)*256;
512
call_c1( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
513
call_a1( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
514
uint64_t rc = pixel_c.hadamard_ac[i]( pix, 16 );
515
uint64_t ra = pixel_asm.hadamard_ac[i]( pix, 16 );
516
if( rc != ra )
517
{
518
ok = 0;
519
fprintf( stderr, "hadamard_ac[%d]: %d,%d != %d,%d\n", i, (int)rc, (int)(rc>>32), (int)ra, (int)(ra>>32) );
520
break;
521
}
522
}
523
call_c2( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
524
call_a2( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
525
}
526
report( "pixel hadamard_ac :" );
527
528
// maximize sum
529
for( int i = 0; i < 32; i++ )
530
for( int j = 0; j < 16; j++ )
531
pbuf4[16*i+j] = -((i+j)&1) & PIXEL_MAX;
532
ok = 1; used_asm = 0;
533
if( pixel_asm.vsad != pixel_ref.vsad )
534
{
535
for( int h = 2; h <= 32; h += 2 )
536
{
537
int res_c, res_asm;
538
set_func_name( "vsad" );
539
used_asm = 1;
540
for( int j = 0; j < 2 && ok; j++ )
541
{
542
pixel *p = j ? pbuf4 : pbuf1;
543
res_c = call_c( pixel_c.vsad, p, (intptr_t)16, h );
544
res_asm = call_a( pixel_asm.vsad, p, (intptr_t)16, h );
545
if( res_c != res_asm )
546
{
547
ok = 0;
548
fprintf( stderr, "vsad: height=%d, %d != %d\n", h, res_c, res_asm );
549
break;
550
}
551
}
552
}
553
}
554
report( "pixel vsad :" );
555
556
ok = 1; used_asm = 0;
557
if( pixel_asm.asd8 != pixel_ref.asd8 )
558
{
559
set_func_name( "asd8" );
560
used_asm = 1;
561
int res_c = call_c( pixel_c.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
562
int res_a = call_a( pixel_asm.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
563
if( res_c != res_a )
564
{
565
ok = 0;
566
fprintf( stderr, "asd: %d != %d\n", res_c, res_a );
567
}
568
}
569
report( "pixel asd :" );
570
571
#define TEST_INTRA_X3( name, i8x8, ... ) \
572
if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
573
{ \
574
ALIGNED_16( int res_c[3] ); \
575
ALIGNED_16( int res_asm[3] ); \
576
set_func_name( #name ); \
577
used_asm = 1; \
578
call_c( pixel_c.name, pbuf1+48, i8x8 ? edge : pbuf3+48, res_c ); \
579
call_a( pixel_asm.name, pbuf1+48, i8x8 ? edge : pbuf3+48, res_asm ); \
580
if( memcmp(res_c, res_asm, sizeof(res_c)) ) \
581
{ \
582
ok = 0; \
583
fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \
584
res_c[0], res_c[1], res_c[2], \
585
res_asm[0], res_asm[1], res_asm[2] ); \
586
} \
587
}
588
589
#define TEST_INTRA_X9( name, cmp ) \
590
if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
591
{ \
592
set_func_name( #name ); \
593
used_asm = 1; \
594
ALIGNED_ARRAY_64( uint16_t, bitcosts,[17] ); \
595
for( int i=0; i<17; i++ ) \
596
bitcosts[i] = 9*(i!=8); \
597
memcpy( pbuf3, pbuf2, 20*FDEC_STRIDE*sizeof(pixel) ); \
598
memcpy( pbuf4, pbuf2, 20*FDEC_STRIDE*sizeof(pixel) ); \
599
for( int i=0; i<32; i++ ) \
600
{ \
601
pixel *fenc = pbuf1+48+i*12; \
602
pixel *fdec1 = pbuf3+48+i*12; \
603
pixel *fdec2 = pbuf4+48+i*12; \
604
int pred_mode = i%9; \
605
int res_c = INT_MAX; \
606
for( int j=0; j<9; j++ ) \
607
{ \
608
predict_4x4[j]( fdec1 ); \
609
int cost = pixel_c.cmp[PIXEL_4x4]( fenc, FENC_STRIDE, fdec1, FDEC_STRIDE ) + 9*(j!=pred_mode); \
610
if( cost < (uint16_t)res_c ) \
611
res_c = cost + (j<<16); \
612
} \
613
predict_4x4[res_c>>16]( fdec1 ); \
614
int res_a = call_a( pixel_asm.name, fenc, fdec2, bitcosts+8-pred_mode ); \
615
if( res_c != res_a ) \
616
{ \
617
ok = 0; \
618
fprintf( stderr, #name": %d,%d != %d,%d [FAILED]\n", res_c>>16, res_c&0xffff, res_a>>16, res_a&0xffff ); \
619
break; \
620
} \
621
if( memcmp(fdec1, fdec2, 4*FDEC_STRIDE*sizeof(pixel)) ) \
622
{ \
623
ok = 0; \
624
fprintf( stderr, #name" [FAILED]\n" ); \
625
for( int j=0; j<16; j++ ) \
626
fprintf( stderr, "%02x ", fdec1[(j&3)+(j>>2)*FDEC_STRIDE] ); \
627
fprintf( stderr, "\n" ); \
628
for( int j=0; j<16; j++ ) \
629
fprintf( stderr, "%02x ", fdec2[(j&3)+(j>>2)*FDEC_STRIDE] ); \
630
fprintf( stderr, "\n" ); \
631
break; \
632
} \
633
} \
634
}
635
636
#define TEST_INTRA8_X9( name, cmp ) \
637
if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
638
{ \
639
set_func_name( #name ); \
640
used_asm = 1; \
641
ALIGNED_ARRAY_64( uint16_t, bitcosts,[17] ); \
642
ALIGNED_ARRAY_16( uint16_t, satds_c,[16] ); \
643
ALIGNED_ARRAY_16( uint16_t, satds_a,[16] ); \
644
memset( satds_c, 0, 16 * sizeof(*satds_c) ); \
645
memset( satds_a, 0, 16 * sizeof(*satds_a) ); \
646
for( int i=0; i<17; i++ ) \
647
bitcosts[i] = 9*(i!=8); \
648
for( int i=0; i<32; i++ ) \
649
{ \
650
pixel *fenc = pbuf1+48+i*12; \
651
pixel *fdec1 = pbuf3+48+i*12; \
652
pixel *fdec2 = pbuf4+48+i*12; \
653
int pred_mode = i%9; \
654
int res_c = INT_MAX; \
655
predict_8x8_filter( fdec1, edge, ALL_NEIGHBORS, ALL_NEIGHBORS ); \
656
for( int j=0; j<9; j++ ) \
657
{ \
658
predict_8x8[j]( fdec1, edge ); \
659
satds_c[j] = pixel_c.cmp[PIXEL_8x8]( fenc, FENC_STRIDE, fdec1, FDEC_STRIDE ) + 9*(j!=pred_mode); \
660
if( satds_c[j] < (uint16_t)res_c ) \
661
res_c = satds_c[j] + (j<<16); \
662
} \
663
predict_8x8[res_c>>16]( fdec1, edge ); \
664
int res_a = call_a( pixel_asm.name, fenc, fdec2, edge, bitcosts+8-pred_mode, satds_a ); \
665
if( res_c != res_a || memcmp(satds_c, satds_a, 16 * sizeof(*satds_c)) ) \
666
{ \
667
ok = 0; \
668
fprintf( stderr, #name": %d,%d != %d,%d [FAILED]\n", res_c>>16, res_c&0xffff, res_a>>16, res_a&0xffff ); \
669
for( int j = 0; j < 9; j++ ) \
670
fprintf( stderr, "%5d ", satds_c[j]); \
671
fprintf( stderr, "\n" ); \
672
for( int j = 0; j < 9; j++ ) \
673
fprintf( stderr, "%5d ", satds_a[j]); \
674
fprintf( stderr, "\n" ); \
675
break; \
676
} \
677
for( int j=0; j<8; j++ ) \
678
if( memcmp(fdec1+j*FDEC_STRIDE, fdec2+j*FDEC_STRIDE, 8*sizeof(pixel)) ) \
679
ok = 0; \
680
if( !ok ) \
681
{ \
682
fprintf( stderr, #name" [FAILED]\n" ); \
683
for( int j=0; j<8; j++ ) \
684
{ \
685
for( int k=0; k<8; k++ ) \
686
fprintf( stderr, "%02x ", fdec1[k+j*FDEC_STRIDE] ); \
687
fprintf( stderr, "\n" ); \
688
} \
689
fprintf( stderr, "\n" ); \
690
for( int j=0; j<8; j++ ) \
691
{ \
692
for( int k=0; k<8; k++ ) \
693
fprintf( stderr, "%02x ", fdec2[k+j*FDEC_STRIDE] ); \
694
fprintf( stderr, "\n" ); \
695
} \
696
fprintf( stderr, "\n" ); \
697
break; \
698
} \
699
} \
700
}
701
702
memcpy( pbuf3, pbuf2, 20*FDEC_STRIDE*sizeof(pixel) );
703
ok = 1; used_asm = 0;
704
TEST_INTRA_X3( intra_satd_x3_16x16, 0 );
705
TEST_INTRA_X3( intra_satd_x3_8x16c, 0 );
706
TEST_INTRA_X3( intra_satd_x3_8x8c, 0 );
707
TEST_INTRA_X3( intra_sa8d_x3_8x8, 1, edge );
708
TEST_INTRA_X3( intra_satd_x3_4x4, 0 );
709
report( "intra satd_x3 :" );
710
ok = 1; used_asm = 0;
711
TEST_INTRA_X3( intra_sad_x3_16x16, 0 );
712
TEST_INTRA_X3( intra_sad_x3_8x16c, 0 );
713
TEST_INTRA_X3( intra_sad_x3_8x8c, 0 );
714
TEST_INTRA_X3( intra_sad_x3_8x8, 1, edge );
715
TEST_INTRA_X3( intra_sad_x3_4x4, 0 );
716
report( "intra sad_x3 :" );
717
ok = 1; used_asm = 0;
718
TEST_INTRA_X9( intra_satd_x9_4x4, satd );
719
TEST_INTRA8_X9( intra_sa8d_x9_8x8, sa8d );
720
report( "intra satd_x9 :" );
721
ok = 1; used_asm = 0;
722
TEST_INTRA_X9( intra_sad_x9_4x4, sad );
723
TEST_INTRA8_X9( intra_sad_x9_8x8, sad );
724
report( "intra sad_x9 :" );
725
726
ok = 1; used_asm = 0;
727
if( pixel_asm.ssd_nv12_core != pixel_ref.ssd_nv12_core )
728
{
729
used_asm = 1;
730
set_func_name( "ssd_nv12" );
731
uint64_t res_u_c, res_v_c, res_u_a, res_v_a;
732
for( int w = 8; w <= 360; w += 8 )
733
{
734
pixel_c.ssd_nv12_core( pbuf1, 368, pbuf2, 368, w, 8, &res_u_c, &res_v_c );
735
pixel_asm.ssd_nv12_core( pbuf1, 368, pbuf2, 368, w, 8, &res_u_a, &res_v_a );
736
if( res_u_c != res_u_a || res_v_c != res_v_a )
737
{
738
ok = 0;
739
fprintf( stderr, "ssd_nv12: %"PRIu64",%"PRIu64" != %"PRIu64",%"PRIu64"\n",
740
res_u_c, res_v_c, res_u_a, res_v_a );
741
}
742
}
743
call_c( pixel_c.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_c, &res_v_c );
744
call_a( pixel_asm.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_a, &res_v_a );
745
}
746
report( "ssd_nv12 :" );
747
748
if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core ||
749
pixel_asm.ssim_end4 != pixel_ref.ssim_end4 )
750
{
751
int cnt;
752
float res_c, res_a;
753
ALIGNED_16( int sums[5][4] ) = {{0}};
754
used_asm = ok = 1;
755
x264_emms();
756
res_c = x264_pixel_ssim_wxh( &pixel_c, pbuf1+2, 32, pbuf2+2, 32, 32, 28, pbuf3, &cnt );
757
res_a = x264_pixel_ssim_wxh( &pixel_asm, pbuf1+2, 32, pbuf2+2, 32, 32, 28, pbuf3, &cnt );
758
if( fabs( res_c - res_a ) > 1e-6 )
759
{
760
ok = 0;
761
fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
762
}
763
set_func_name( "ssim_core" );
764
call_c( pixel_c.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
765
call_a( pixel_asm.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
766
set_func_name( "ssim_end" );
767
call_c2( pixel_c.ssim_end4, sums, sums, 4 );
768
call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
769
/* check incorrect assumptions that 32-bit ints are zero-extended to 64-bit */
770
call_c1( pixel_c.ssim_end4, sums, sums, 3 );
771
call_a1( pixel_asm.ssim_end4, sums, sums, 3 );
772
report( "ssim :" );
773
}
774
775
ok = 1; used_asm = 0;
776
for( int i = 0; i < 32; i++ )
777
cost_mv[i] = i*10;
778
for( int i = 0; i < 100 && ok; i++ )
779
if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] )
780
{
781
ALIGNED_16( uint16_t sums[72] );
782
ALIGNED_16( int dc[4] );
783
ALIGNED_16( int16_t mvs_a[48] );
784
ALIGNED_16( int16_t mvs_c[48] );
785
int mvn_a, mvn_c;
786
int thresh = rand() & 0x3fff;
787
set_func_name( "esa_ads" );
788
for( int j = 0; j < 72; j++ )
789
sums[j] = rand() & 0x3fff;
790
for( int j = 0; j < 4; j++ )
791
dc[j] = rand() & 0x3fff;
792
used_asm = 1;
793
mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh );
794
mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh );
795
if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) )
796
{
797
ok = 0;
798
printf( "c%d: ", i&3 );
799
for( int j = 0; j < mvn_c; j++ )
800
printf( "%d ", mvs_c[j] );
801
printf( "\na%d: ", i&3 );
802
for( int j = 0; j < mvn_a; j++ )
803
printf( "%d ", mvs_a[j] );
804
printf( "\n\n" );
805
}
806
}
807
report( "esa ads:" );
808
809
return ret;
810
}
811
812
static int check_dct( int cpu_ref, int cpu_new )
813
{
814
x264_dct_function_t dct_c;
815
x264_dct_function_t dct_ref;
816
x264_dct_function_t dct_asm;
817
x264_quant_function_t qf;
818
int ret = 0, ok, used_asm, interlace = 0;
819
ALIGNED_ARRAY_N( dctcoef, dct1, [16],[16] );
820
ALIGNED_ARRAY_N( dctcoef, dct2, [16],[16] );
821
ALIGNED_ARRAY_N( dctcoef, dct4, [16],[16] );
822
ALIGNED_ARRAY_N( dctcoef, dct8, [4],[64] );
823
ALIGNED_16( dctcoef dctdc[2][8] );
824
x264_t h_buf;
825
x264_t *h = &h_buf;
826
827
x264_dct_init( 0, &dct_c );
828
x264_dct_init( cpu_ref, &dct_ref);
829
x264_dct_init( cpu_new, &dct_asm );
830
831
memset( h, 0, sizeof(*h) );
832
x264_param_default( &h->param );
833
h->sps->i_chroma_format_idc = 1;
834
h->chroma_qp_table = i_chroma_qp_table + 12;
835
h->param.analyse.i_luma_deadzone[0] = 0;
836
h->param.analyse.i_luma_deadzone[1] = 0;
837
h->param.analyse.b_transform_8x8 = 1;
838
for( int i = 0; i < 6; i++ )
839
h->pps->scaling_list[i] = x264_cqm_flat16;
840
x264_cqm_init( h );
841
x264_quant_init( h, 0, &qf );
842
843
/* overflow test cases */
844
for( int i = 0; i < 5; i++ )
845
{
846
pixel *enc = &pbuf3[16*i*FENC_STRIDE];
847
pixel *dec = &pbuf4[16*i*FDEC_STRIDE];
848
849
for( int j = 0; j < 16; j++ )
850
{
851
int cond_a = (i < 2) ? 1 : ((j&3) == 0 || (j&3) == (i-1));
852
int cond_b = (i == 0) ? 1 : !cond_a;
853
enc[0] = enc[1] = enc[4] = enc[5] = enc[8] = enc[9] = enc[12] = enc[13] = cond_a ? PIXEL_MAX : 0;
854
enc[2] = enc[3] = enc[6] = enc[7] = enc[10] = enc[11] = enc[14] = enc[15] = cond_b ? PIXEL_MAX : 0;
855
856
for( int k = 0; k < 4; k++ )
857
dec[k] = PIXEL_MAX - enc[k];
858
859
enc += FENC_STRIDE;
860
dec += FDEC_STRIDE;
861
}
862
}
863
864
#define TEST_DCT( name, t1, t2, size ) \
865
if( dct_asm.name != dct_ref.name ) \
866
{ \
867
set_func_name( #name ); \
868
used_asm = 1; \
869
pixel *enc = pbuf3; \
870
pixel *dec = pbuf4; \
871
for( int j = 0; j < 5; j++) \
872
{ \
873
call_c( dct_c.name, t1, &pbuf1[j*64], &pbuf2[j*64] ); \
874
call_a( dct_asm.name, t2, &pbuf1[j*64], &pbuf2[j*64] ); \
875
if( memcmp( t1, t2, size*sizeof(dctcoef) ) ) \
876
{ \
877
ok = 0; \
878
fprintf( stderr, #name " [FAILED]\n" ); \
879
for( int k = 0; k < size; k++ )\
880
printf( "%d ", ((dctcoef*)t1)[k] );\
881
printf("\n");\
882
for( int k = 0; k < size; k++ )\
883
printf( "%d ", ((dctcoef*)t2)[k] );\
884
printf("\n");\
885
break; \
886
} \
887
call_c( dct_c.name, t1, enc, dec ); \
888
call_a( dct_asm.name, t2, enc, dec ); \
889
if( memcmp( t1, t2, size*sizeof(dctcoef) ) ) \
890
{ \
891
ok = 0; \
892
fprintf( stderr, #name " [FAILED] (overflow)\n" ); \
893
break; \
894
} \
895
enc += 16*FENC_STRIDE; \
896
dec += 16*FDEC_STRIDE; \
897
} \
898
}
899
ok = 1; used_asm = 0;
900
TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16 );
901
TEST_DCT( sub8x8_dct, dct1, dct2, 16*4 );
902
TEST_DCT( sub8x8_dct_dc, dctdc[0], dctdc[1], 4 );
903
TEST_DCT( sub8x16_dct_dc, dctdc[0], dctdc[1], 8 );
904
TEST_DCT( sub16x16_dct, dct1, dct2, 16*16 );
905
report( "sub_dct4 :" );
906
907
ok = 1; used_asm = 0;
908
TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64 );
909
TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*4 );
910
report( "sub_dct8 :" );
911
#undef TEST_DCT
912
913
// fdct and idct are denormalized by different factors, so quant/dequant
914
// is needed to force the coefs into the right range.
915
dct_c.sub16x16_dct( dct4, pbuf1, pbuf2 );
916
dct_c.sub16x16_dct8( dct8, pbuf1, pbuf2 );
917
for( int i = 0; i < 16; i++ )
918
{
919
qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] );
920
qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 );
921
}
922
for( int i = 0; i < 4; i++ )
923
{
924
qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] );
925
qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 );
926
}
927
x264_cqm_delete( h );
928
929
#define TEST_IDCT( name, src ) \
930
if( dct_asm.name != dct_ref.name ) \
931
{ \
932
set_func_name( #name ); \
933
used_asm = 1; \
934
memcpy( pbuf3, pbuf1, 32*32 * sizeof(pixel) ); \
935
memcpy( pbuf4, pbuf1, 32*32 * sizeof(pixel) ); \
936
memcpy( dct1, src, 256 * sizeof(dctcoef) ); \
937
memcpy( dct2, src, 256 * sizeof(dctcoef) ); \
938
call_c1( dct_c.name, pbuf3, (void*)dct1 ); \
939
call_a1( dct_asm.name, pbuf4, (void*)dct2 ); \
940
if( memcmp( pbuf3, pbuf4, 32*32 * sizeof(pixel) ) ) \
941
{ \
942
ok = 0; \
943
fprintf( stderr, #name " [FAILED]\n" ); \
944
} \
945
call_c2( dct_c.name, pbuf3, (void*)dct1 ); \
946
call_a2( dct_asm.name, pbuf4, (void*)dct2 ); \
947
}
948
ok = 1; used_asm = 0;
949
TEST_IDCT( add4x4_idct, dct4 );
950
TEST_IDCT( add8x8_idct, dct4 );
951
TEST_IDCT( add8x8_idct_dc, dct4 );
952
TEST_IDCT( add16x16_idct, dct4 );
953
TEST_IDCT( add16x16_idct_dc, dct4 );
954
report( "add_idct4 :" );
955
956
ok = 1; used_asm = 0;
957
TEST_IDCT( add8x8_idct8, dct8 );
958
TEST_IDCT( add16x16_idct8, dct8 );
959
report( "add_idct8 :" );
960
#undef TEST_IDCT
961
962
#define TEST_DCTDC( name )\
963
ok = 1; used_asm = 0;\
964
if( dct_asm.name != dct_ref.name )\
965
{\
966
set_func_name( #name );\
967
used_asm = 1;\
968
uint16_t *p = (uint16_t*)buf1;\
969
for( int i = 0; i < 16 && ok; i++ )\
970
{\
971
for( int j = 0; j < 16; j++ )\
972
dct1[0][j] = !i ? (j^j>>1^j>>2^j>>3)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max dc */\
973
: i<8 ? (*p++)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max elements */\
974
: ((*p++)&0x1fff)-0x1000; /* general case */\
975
memcpy( dct2, dct1, 16 * sizeof(dctcoef) );\
976
call_c1( dct_c.name, dct1[0] );\
977
call_a1( dct_asm.name, dct2[0] );\
978
if( memcmp( dct1, dct2, 16 * sizeof(dctcoef) ) )\
979
ok = 0;\
980
}\
981
call_c2( dct_c.name, dct1[0] );\
982
call_a2( dct_asm.name, dct2[0] );\
983
}\
984
report( #name " :" );
985
986
TEST_DCTDC( dct4x4dc );
987
TEST_DCTDC( idct4x4dc );
988
#undef TEST_DCTDC
989
990
#define TEST_DCTDC_CHROMA( name )\
991
ok = 1; used_asm = 0;\
992
if( dct_asm.name != dct_ref.name )\
993
{\
994
set_func_name( #name );\
995
used_asm = 1;\
996
uint16_t *p = (uint16_t*)buf1;\
997
for( int i = 0; i < 16 && ok; i++ )\
998
{\
999
for( int j = 0; j < 8; j++ )\
1000
dct1[j][0] = !i ? (j^j>>1^j>>2)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max dc */\
1001
: i<8 ? (*p++)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max elements */\
1002
: ((*p++)&0x1fff)-0x1000; /* general case */\
1003
memcpy( dct2, dct1, 8*16 * sizeof(dctcoef) );\
1004
call_c1( dct_c.name, dctdc[0], dct1 );\
1005
call_a1( dct_asm.name, dctdc[1], dct2 );\
1006
if( memcmp( dctdc[0], dctdc[1], 8 * sizeof(dctcoef) ) || memcmp( dct1, dct2, 8*16 * sizeof(dctcoef) ) )\
1007
{\
1008
ok = 0;\
1009
fprintf( stderr, #name " [FAILED]\n" ); \
1010
}\
1011
}\
1012
call_c2( dct_c.name, dctdc[0], dct1 );\
1013
call_a2( dct_asm.name, dctdc[1], dct2 );\
1014
}\
1015
report( #name " :" );
1016
1017
TEST_DCTDC_CHROMA( dct2x4dc );
1018
#undef TEST_DCTDC_CHROMA
1019
1020
x264_zigzag_function_t zigzag_c[2];
1021
x264_zigzag_function_t zigzag_ref[2];
1022
x264_zigzag_function_t zigzag_asm[2];
1023
1024
ALIGNED_16( dctcoef level1[64] );
1025
ALIGNED_16( dctcoef level2[64] );
1026
1027
#define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \
1028
if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1029
{ \
1030
set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1031
used_asm = 1; \
1032
for( int i = 0; i < size*size; i++ ) \
1033
dct[i] = i; \
1034
call_c( zigzag_c[interlace].name, t1, dct ); \
1035
call_a( zigzag_asm[interlace].name, t2, dct ); \
1036
if( memcmp( t1, t2, size*size*sizeof(dctcoef) ) ) \
1037
{ \
1038
ok = 0; \
1039
for( int i = 0; i < 2; i++ ) \
1040
{ \
1041
dctcoef *d = (dctcoef*)(i ? t2 : t1); \
1042
for( int j = 0; j < size; j++ ) \
1043
{ \
1044
for( int k = 0; k < size; k++ ) \
1045
fprintf( stderr, "%2d ", d[k+j*8] ); \
1046
fprintf( stderr, "\n" ); \
1047
} \
1048
fprintf( stderr, "\n" ); \
1049
} \
1050
fprintf( stderr, #name " [FAILED]\n" ); \
1051
} \
1052
}
1053
1054
#define TEST_ZIGZAG_SUB( name, t1, t2, size ) \
1055
if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1056
{ \
1057
int nz_a, nz_c; \
1058
set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1059
used_asm = 1; \
1060
memcpy( pbuf3, pbuf1, 16*FDEC_STRIDE * sizeof(pixel) ); \
1061
memcpy( pbuf4, pbuf1, 16*FDEC_STRIDE * sizeof(pixel) ); \
1062
nz_c = call_c1( zigzag_c[interlace].name, t1, pbuf2, pbuf3 ); \
1063
nz_a = call_a1( zigzag_asm[interlace].name, t2, pbuf2, pbuf4 ); \
1064
if( memcmp( t1, t2, size*sizeof(dctcoef) ) || memcmp( pbuf3, pbuf4, 16*FDEC_STRIDE*sizeof(pixel) ) || nz_c != nz_a ) \
1065
{ \
1066
ok = 0; \
1067
fprintf( stderr, #name " [FAILED]\n" ); \
1068
} \
1069
call_c2( zigzag_c[interlace].name, t1, pbuf2, pbuf3 ); \
1070
call_a2( zigzag_asm[interlace].name, t2, pbuf2, pbuf4 ); \
1071
}
1072
1073
#define TEST_ZIGZAG_SUBAC( name, t1, t2 ) \
1074
if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1075
{ \
1076
int nz_a, nz_c; \
1077
dctcoef dc_a, dc_c; \
1078
set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1079
used_asm = 1; \
1080
for( int i = 0; i < 2; i++ ) \
1081
{ \
1082
memcpy( pbuf3, pbuf2, 16*FDEC_STRIDE * sizeof(pixel) ); \
1083
memcpy( pbuf4, pbuf2, 16*FDEC_STRIDE * sizeof(pixel) ); \
1084
for( int j = 0; j < 4; j++ ) \
1085
{ \
1086
memcpy( pbuf3 + j*FDEC_STRIDE, (i?pbuf1:pbuf2) + j*FENC_STRIDE, 4 * sizeof(pixel) ); \
1087
memcpy( pbuf4 + j*FDEC_STRIDE, (i?pbuf1:pbuf2) + j*FENC_STRIDE, 4 * sizeof(pixel) ); \
1088
} \
1089
nz_c = call_c1( zigzag_c[interlace].name, t1, pbuf2, pbuf3, &dc_c ); \
1090
nz_a = call_a1( zigzag_asm[interlace].name, t2, pbuf2, pbuf4, &dc_a ); \
1091
if( memcmp( t1+1, t2+1, 15*sizeof(dctcoef) ) || memcmp( pbuf3, pbuf4, 16*FDEC_STRIDE * sizeof(pixel) ) || nz_c != nz_a || dc_c != dc_a ) \
1092
{ \
1093
ok = 0; \
1094
fprintf( stderr, #name " [FAILED]\n" ); \
1095
break; \
1096
} \
1097
} \
1098
call_c2( zigzag_c[interlace].name, t1, pbuf2, pbuf3, &dc_c ); \
1099
call_a2( zigzag_asm[interlace].name, t2, pbuf2, pbuf4, &dc_a ); \
1100
}
1101
1102
#define TEST_INTERLEAVE( name, t1, t2, dct, size ) \
1103
if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1104
{ \
1105
for( int j = 0; j < 100; j++ ) \
1106
{ \
1107
set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1108
used_asm = 1; \
1109
memcpy(dct, buf1, size*sizeof(dctcoef)); \
1110
for( int i = 0; i < size; i++ ) \
1111
dct[i] = rand()&0x1F ? 0 : dct[i]; \
1112
memcpy(buf3, buf4, 10); \
1113
call_c( zigzag_c[interlace].name, t1, dct, buf3 ); \
1114
call_a( zigzag_asm[interlace].name, t2, dct, buf4 ); \
1115
if( memcmp( t1, t2, size*sizeof(dctcoef) ) || memcmp( buf3, buf4, 10 ) ) \
1116
{ \
1117
ok = 0; printf("%d: %d %d %d %d\n%d %d %d %d\n\n",memcmp( t1, t2, size*sizeof(dctcoef) ),buf3[0], buf3[1], buf3[8], buf3[9], buf4[0], buf4[1], buf4[8], buf4[9]);break;\
1118
} \
1119
} \
1120
}
1121
1122
x264_zigzag_init( 0, &zigzag_c[0], &zigzag_c[1] );
1123
x264_zigzag_init( cpu_ref, &zigzag_ref[0], &zigzag_ref[1] );
1124
x264_zigzag_init( cpu_new, &zigzag_asm[0], &zigzag_asm[1] );
1125
1126
ok = 1; used_asm = 0;
1127
TEST_INTERLEAVE( interleave_8x8_cavlc, level1, level2, dct8[0], 64 );
1128
report( "zigzag_interleave :" );
1129
1130
for( interlace = 0; interlace <= 1; interlace++ )
1131
{
1132
ok = 1; used_asm = 0;
1133
TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, dct8[0], 8 );
1134
TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 4 );
1135
TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
1136
TEST_ZIGZAG_SUB( sub_8x8, level1, level2, 64 );
1137
TEST_ZIGZAG_SUBAC( sub_4x4ac, level1, level2 );
1138
report( interlace ? "zigzag_field :" : "zigzag_frame :" );
1139
}
1140
#undef TEST_ZIGZAG_SCAN
1141
#undef TEST_ZIGZAG_SUB
1142
1143
return ret;
1144
}
1145
1146
static int check_mc( int cpu_ref, int cpu_new )
1147
{
1148
x264_mc_functions_t mc_c;
1149
x264_mc_functions_t mc_ref;
1150
x264_mc_functions_t mc_a;
1151
x264_pixel_function_t pixf;
1152
1153
pixel *src = &(pbuf1)[2*64+2];
1154
pixel *src2[4] = { &(pbuf1)[3*64+2], &(pbuf1)[5*64+2],
1155
&(pbuf1)[7*64+2], &(pbuf1)[9*64+2] };
1156
pixel *dst1 = pbuf3;
1157
pixel *dst2 = pbuf4;
1158
1159
int ret = 0, ok, used_asm;
1160
1161
x264_mc_init( 0, &mc_c, 0 );
1162
x264_mc_init( cpu_ref, &mc_ref, 0 );
1163
x264_mc_init( cpu_new, &mc_a, 0 );
1164
x264_pixel_init( 0, &pixf );
1165
1166
#define MC_TEST_LUMA( w, h ) \
1167
if( mc_a.mc_luma != mc_ref.mc_luma && !(w&(w-1)) && h<=16 ) \
1168
{ \
1169
const x264_weight_t *weight = x264_weight_none; \
1170
set_func_name( "mc_luma_%dx%d", w, h ); \
1171
used_asm = 1; \
1172
for( int i = 0; i < 1024; i++ ) \
1173
pbuf3[i] = pbuf4[i] = 0xCD; \
1174
call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1175
call_a( mc_a.mc_luma, dst2, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1176
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
1177
{ \
1178
fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
1179
ok = 0; \
1180
} \
1181
} \
1182
if( mc_a.get_ref != mc_ref.get_ref ) \
1183
{ \
1184
pixel *ref = dst2; \
1185
intptr_t ref_stride = 32; \
1186
int w_checked = ( ( sizeof(pixel) == 2 && (w == 12 || w == 20)) ? w-2 : w ); \
1187
const x264_weight_t *weight = x264_weight_none; \
1188
set_func_name( "get_ref_%dx%d", w_checked, h ); \
1189
used_asm = 1; \
1190
for( int i = 0; i < 1024; i++ ) \
1191
pbuf3[i] = pbuf4[i] = 0xCD; \
1192
call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1193
ref = (pixel*)call_a( mc_a.get_ref, ref, &ref_stride, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1194
for( int i = 0; i < h; i++ ) \
1195
if( memcmp( dst1+i*32, ref+i*ref_stride, w_checked * sizeof(pixel) ) ) \
1196
{ \
1197
fprintf( stderr, "get_ref[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w_checked, h ); \
1198
ok = 0; \
1199
break; \
1200
} \
1201
}
1202
1203
#define MC_TEST_CHROMA( w, h ) \
1204
if( mc_a.mc_chroma != mc_ref.mc_chroma ) \
1205
{ \
1206
set_func_name( "mc_chroma_%dx%d", w, h ); \
1207
used_asm = 1; \
1208
for( int i = 0; i < 1024; i++ ) \
1209
pbuf3[i] = pbuf4[i] = 0xCD; \
1210
call_c( mc_c.mc_chroma, dst1, dst1+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
1211
call_a( mc_a.mc_chroma, dst2, dst2+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
1212
/* mc_chroma width=2 may write garbage to the right of dst. ignore that. */ \
1213
for( int j = 0; j < h; j++ ) \
1214
for( int i = w; i < 8; i++ ) \
1215
{ \
1216
dst2[i+j*16+8] = dst1[i+j*16+8]; \
1217
dst2[i+j*16 ] = dst1[i+j*16 ]; \
1218
} \
1219
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
1220
{ \
1221
fprintf( stderr, "mc_chroma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
1222
ok = 0; \
1223
} \
1224
}
1225
ok = 1; used_asm = 0;
1226
for( int dy = -8; dy < 8; dy++ )
1227
for( int dx = -128; dx < 128; dx++ )
1228
{
1229
if( rand()&15 ) continue; // running all of them is too slow
1230
MC_TEST_LUMA( 20, 18 );
1231
MC_TEST_LUMA( 16, 16 );
1232
MC_TEST_LUMA( 16, 8 );
1233
MC_TEST_LUMA( 12, 10 );
1234
MC_TEST_LUMA( 8, 16 );
1235
MC_TEST_LUMA( 8, 8 );
1236
MC_TEST_LUMA( 8, 4 );
1237
MC_TEST_LUMA( 4, 8 );
1238
MC_TEST_LUMA( 4, 4 );
1239
}
1240
report( "mc luma :" );
1241
1242
ok = 1; used_asm = 0;
1243
for( int dy = -1; dy < 9; dy++ )
1244
for( int dx = -128; dx < 128; dx++ )
1245
{
1246
if( rand()&15 ) continue;
1247
MC_TEST_CHROMA( 8, 8 );
1248
MC_TEST_CHROMA( 8, 4 );
1249
MC_TEST_CHROMA( 4, 8 );
1250
MC_TEST_CHROMA( 4, 4 );
1251
MC_TEST_CHROMA( 4, 2 );
1252
MC_TEST_CHROMA( 2, 4 );
1253
MC_TEST_CHROMA( 2, 2 );
1254
}
1255
report( "mc chroma :" );
1256
#undef MC_TEST_LUMA
1257
#undef MC_TEST_CHROMA
1258
1259
#define MC_TEST_AVG( name, weight ) \
1260
{ \
1261
for( int i = 0; i < 12; i++ ) \
1262
{ \
1263
memcpy( pbuf3, pbuf1+320, 320 * sizeof(pixel) ); \
1264
memcpy( pbuf4, pbuf1+320, 320 * sizeof(pixel) ); \
1265
if( mc_a.name[i] != mc_ref.name[i] ) \
1266
{ \
1267
set_func_name( "%s_%s", #name, pixel_names[i] ); \
1268
used_asm = 1; \
1269
call_c1( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1270
call_a1( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1271
if( memcmp( pbuf3, pbuf4, 320 * sizeof(pixel) ) ) \
1272
{ \
1273
ok = 0; \
1274
fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
1275
} \
1276
call_c2( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1277
call_a2( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1278
} \
1279
} \
1280
}
1281
1282
ok = 1, used_asm = 0;
1283
for( int w = -63; w <= 127 && ok; w++ )
1284
MC_TEST_AVG( avg, w );
1285
report( "mc wpredb :" );
1286
1287
#define MC_TEST_WEIGHT( name, weight, aligned ) \
1288
int align_off = (aligned ? 0 : rand()%16); \
1289
for( int i = 1; i <= 5; i++ ) \
1290
{ \
1291
ALIGNED_16( pixel buffC[640] ); \
1292
ALIGNED_16( pixel buffA[640] ); \
1293
int j = X264_MAX( i*4, 2 ); \
1294
memset( buffC, 0, 640 * sizeof(pixel) ); \
1295
memset( buffA, 0, 640 * sizeof(pixel) ); \
1296
x264_t ha; \
1297
ha.mc = mc_a; \
1298
/* w12 is the same as w16 in some cases */ \
1299
if( i == 3 && mc_a.name[i] == mc_a.name[i+1] ) \
1300
continue; \
1301
if( mc_a.name[i] != mc_ref.name[i] ) \
1302
{ \
1303
set_func_name( "%s_w%d", #name, j ); \
1304
used_asm = 1; \
1305
call_c1( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1306
mc_a.weight_cache(&ha, &weight); \
1307
call_a1( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1308
for( int k = 0; k < 16; k++ ) \
1309
if( memcmp( &buffC[k*32], &buffA[k*32], j * sizeof(pixel) ) ) \
1310
{ \
1311
ok = 0; \
1312
fprintf( stderr, #name "[%d]: [FAILED] s:%d o:%d d%d\n", i, s, o, d ); \
1313
break; \
1314
} \
1315
/* omit unlikely high scales for benchmarking */ \
1316
if( (s << (8-d)) < 512 ) \
1317
{ \
1318
call_c2( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1319
call_a2( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1320
} \
1321
} \
1322
}
1323
1324
ok = 1; used_asm = 0;
1325
1326
int align_cnt = 0;
1327
for( int s = 0; s <= 127 && ok; s++ )
1328
{
1329
for( int o = -128; o <= 127 && ok; o++ )
1330
{
1331
if( rand() & 2047 ) continue;
1332
for( int d = 0; d <= 7 && ok; d++ )
1333
{
1334
if( s == 1<<d )
1335
continue;
1336
x264_weight_t weight = { .i_scale = s, .i_denom = d, .i_offset = o };
1337
MC_TEST_WEIGHT( weight, weight, (align_cnt++ % 4) );
1338
}
1339
}
1340
1341
}
1342
report( "mc weight :" );
1343
1344
ok = 1; used_asm = 0;
1345
for( int o = 0; o <= 127 && ok; o++ )
1346
{
1347
int s = 1, d = 0;
1348
if( rand() & 15 ) continue;
1349
x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
1350
MC_TEST_WEIGHT( offsetadd, weight, (align_cnt++ % 4) );
1351
}
1352
report( "mc offsetadd :" );
1353
ok = 1; used_asm = 0;
1354
for( int o = -128; o < 0 && ok; o++ )
1355
{
1356
int s = 1, d = 0;
1357
if( rand() & 15 ) continue;
1358
x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
1359
MC_TEST_WEIGHT( offsetsub, weight, (align_cnt++ % 4) );
1360
}
1361
report( "mc offsetsub :" );
1362
1363
ok = 1; used_asm = 0;
1364
for( int height = 8; height <= 16; height += 8 )
1365
{
1366
if( mc_a.store_interleave_chroma != mc_ref.store_interleave_chroma )
1367
{
1368
set_func_name( "store_interleave_chroma" );
1369
used_asm = 1;
1370
memset( pbuf3, 0, 64*height );
1371
memset( pbuf4, 0, 64*height );
1372
call_c( mc_c.store_interleave_chroma, pbuf3, (intptr_t)64, pbuf1, pbuf1+16, height );
1373
call_a( mc_a.store_interleave_chroma, pbuf4, (intptr_t)64, pbuf1, pbuf1+16, height );
1374
if( memcmp( pbuf3, pbuf4, 64*height ) )
1375
{
1376
ok = 0;
1377
fprintf( stderr, "store_interleave_chroma FAILED: h=%d\n", height );
1378
break;
1379
}
1380
}
1381
if( mc_a.load_deinterleave_chroma_fenc != mc_ref.load_deinterleave_chroma_fenc )
1382
{
1383
set_func_name( "load_deinterleave_chroma_fenc" );
1384
used_asm = 1;
1385
call_c( mc_c.load_deinterleave_chroma_fenc, pbuf3, pbuf1, (intptr_t)64, height );
1386
call_a( mc_a.load_deinterleave_chroma_fenc, pbuf4, pbuf1, (intptr_t)64, height );
1387
if( memcmp( pbuf3, pbuf4, FENC_STRIDE*height ) )
1388
{
1389
ok = 0;
1390
fprintf( stderr, "load_deinterleave_chroma_fenc FAILED: h=%d\n", height );
1391
break;
1392
}
1393
}
1394
if( mc_a.load_deinterleave_chroma_fdec != mc_ref.load_deinterleave_chroma_fdec )
1395
{
1396
set_func_name( "load_deinterleave_chroma_fdec" );
1397
used_asm = 1;
1398
call_c( mc_c.load_deinterleave_chroma_fdec, pbuf3, pbuf1, (intptr_t)64, height );
1399
call_a( mc_a.load_deinterleave_chroma_fdec, pbuf4, pbuf1, (intptr_t)64, height );
1400
if( memcmp( pbuf3, pbuf4, FDEC_STRIDE*height ) )
1401
{
1402
ok = 0;
1403
fprintf( stderr, "load_deinterleave_chroma_fdec FAILED: h=%d\n", height );
1404
break;
1405
}
1406
}
1407
}
1408
report( "store_interleave :" );
1409
1410
struct plane_spec {
1411
int w, h, src_stride;
1412
} plane_specs[] = { {2,2,2}, {8,6,8}, {20,31,24}, {32,8,40}, {256,10,272}, {504,7,505}, {528,6,528}, {256,10,-256}, {263,9,-264}, {1904,1,0} };
1413
ok = 1; used_asm = 0;
1414
if( mc_a.plane_copy != mc_ref.plane_copy )
1415
{
1416
set_func_name( "plane_copy" );
1417
used_asm = 1;
1418
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1419
{
1420
int w = plane_specs[i].w;
1421
int h = plane_specs[i].h;
1422
intptr_t src_stride = plane_specs[i].src_stride;
1423
intptr_t dst_stride = (w + 127) & ~63;
1424
assert( dst_stride * h <= 0x1000 );
1425
pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1426
memset( pbuf3, 0, 0x1000*sizeof(pixel) );
1427
memset( pbuf4, 0, 0x1000*sizeof(pixel) );
1428
call_c( mc_c.plane_copy, pbuf3, dst_stride, src1, src_stride, w, h );
1429
call_a( mc_a.plane_copy, pbuf4, dst_stride, src1, src_stride, w, h );
1430
for( int y = 0; y < h; y++ )
1431
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*sizeof(pixel) ) )
1432
{
1433
ok = 0;
1434
fprintf( stderr, "plane_copy FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1435
break;
1436
}
1437
}
1438
}
1439
1440
if( mc_a.plane_copy_swap != mc_ref.plane_copy_swap )
1441
{
1442
set_func_name( "plane_copy_swap" );
1443
used_asm = 1;
1444
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1445
{
1446
int w = (plane_specs[i].w + 1) >> 1;
1447
int h = plane_specs[i].h;
1448
intptr_t src_stride = plane_specs[i].src_stride;
1449
intptr_t dst_stride = (2*w + 127) & ~63;
1450
assert( dst_stride * h <= 0x1000 );
1451
pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1452
memset( pbuf3, 0, 0x1000*sizeof(pixel) );
1453
memset( pbuf4, 0, 0x1000*sizeof(pixel) );
1454
call_c( mc_c.plane_copy_swap, pbuf3, dst_stride, src1, src_stride, w, h );
1455
call_a( mc_a.plane_copy_swap, pbuf4, dst_stride, src1, src_stride, w, h );
1456
for( int y = 0; y < h; y++ )
1457
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, 2*w*sizeof(pixel) ) )
1458
{
1459
ok = 0;
1460
fprintf( stderr, "plane_copy_swap FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1461
break;
1462
}
1463
}
1464
}
1465
1466
if( mc_a.plane_copy_interleave != mc_ref.plane_copy_interleave )
1467
{
1468
set_func_name( "plane_copy_interleave" );
1469
used_asm = 1;
1470
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1471
{
1472
int w = (plane_specs[i].w + 1) >> 1;
1473
int h = plane_specs[i].h;
1474
intptr_t src_stride = (plane_specs[i].src_stride + 1) >> 1;
1475
intptr_t dst_stride = (2*w + 127) & ~63;
1476
assert( dst_stride * h <= 0x1000 );
1477
pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1478
memset( pbuf3, 0, 0x1000*sizeof(pixel) );
1479
memset( pbuf4, 0, 0x1000*sizeof(pixel) );
1480
call_c( mc_c.plane_copy_interleave, pbuf3, dst_stride, src1, src_stride, src1+1024, src_stride+16, w, h );
1481
call_a( mc_a.plane_copy_interleave, pbuf4, dst_stride, src1, src_stride, src1+1024, src_stride+16, w, h );
1482
for( int y = 0; y < h; y++ )
1483
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, 2*w*sizeof(pixel) ) )
1484
{
1485
ok = 0;
1486
fprintf( stderr, "plane_copy_interleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1487
break;
1488
}
1489
}
1490
}
1491
1492
if( mc_a.plane_copy_deinterleave != mc_ref.plane_copy_deinterleave )
1493
{
1494
set_func_name( "plane_copy_deinterleave" );
1495
used_asm = 1;
1496
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1497
{
1498
int w = (plane_specs[i].w + 1) >> 1;
1499
int h = plane_specs[i].h;
1500
intptr_t dst_stride = w;
1501
intptr_t src_stride = (2*w + 127) & ~63;
1502
intptr_t offv = (dst_stride*h + 31) & ~15;
1503
memset( pbuf3, 0, 0x1000 );
1504
memset( pbuf4, 0, 0x1000 );
1505
call_c( mc_c.plane_copy_deinterleave, pbuf3, dst_stride, pbuf3+offv, dst_stride, pbuf1, src_stride, w, h );
1506
call_a( mc_a.plane_copy_deinterleave, pbuf4, dst_stride, pbuf4+offv, dst_stride, pbuf1, src_stride, w, h );
1507
for( int y = 0; y < h; y++ )
1508
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w ) ||
1509
memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w ) )
1510
{
1511
ok = 0;
1512
fprintf( stderr, "plane_copy_deinterleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1513
break;
1514
}
1515
}
1516
}
1517
1518
if( mc_a.plane_copy_deinterleave_rgb != mc_ref.plane_copy_deinterleave_rgb )
1519
{
1520
set_func_name( "plane_copy_deinterleave_rgb" );
1521
used_asm = 1;
1522
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1523
{
1524
int w = (plane_specs[i].w + 2) >> 2;
1525
int h = plane_specs[i].h;
1526
intptr_t src_stride = plane_specs[i].src_stride;
1527
intptr_t dst_stride = ALIGN( w, 16 );
1528
intptr_t offv = dst_stride*h + 16;
1529
1530
for( int pw = 3; pw <= 4; pw++ )
1531
{
1532
memset( pbuf3, 0, 0x1000 );
1533
memset( pbuf4, 0, 0x1000 );
1534
call_c( mc_c.plane_copy_deinterleave_rgb, pbuf3, dst_stride, pbuf3+offv, dst_stride, pbuf3+2*offv, dst_stride, pbuf1, src_stride, pw, w, h );
1535
call_a( mc_a.plane_copy_deinterleave_rgb, pbuf4, dst_stride, pbuf4+offv, dst_stride, pbuf4+2*offv, dst_stride, pbuf1, src_stride, pw, w, h );
1536
for( int y = 0; y < h; y++ )
1537
if( memcmp( pbuf3+y*dst_stride+0*offv, pbuf4+y*dst_stride+0*offv, w ) ||
1538
memcmp( pbuf3+y*dst_stride+1*offv, pbuf4+y*dst_stride+1*offv, w ) ||
1539
memcmp( pbuf3+y*dst_stride+2*offv, pbuf4+y*dst_stride+2*offv, w ) )
1540
{
1541
ok = 0;
1542
fprintf( stderr, "plane_copy_deinterleave_rgb FAILED: w=%d h=%d stride=%d pw=%d\n", w, h, (int)src_stride, pw );
1543
break;
1544
}
1545
}
1546
}
1547
}
1548
report( "plane_copy :" );
1549
1550
if( mc_a.plane_copy_deinterleave_v210 != mc_ref.plane_copy_deinterleave_v210 )
1551
{
1552
set_func_name( "plane_copy_deinterleave_v210" );
1553
ok = 1; used_asm = 1;
1554
for( int i = 0; i < sizeof(plane_specs)/sizeof(*plane_specs); i++ )
1555
{
1556
int w = (plane_specs[i].w + 1) >> 1;
1557
int h = plane_specs[i].h;
1558
intptr_t dst_stride = ALIGN( w, 16 );
1559
intptr_t src_stride = (w + 47) / 48 * 128 / sizeof(uint32_t);
1560
intptr_t offv = dst_stride*h + 32;
1561
memset( pbuf3, 0, 0x1000 );
1562
memset( pbuf4, 0, 0x1000 );
1563
call_c( mc_c.plane_copy_deinterleave_v210, pbuf3, dst_stride, pbuf3+offv, dst_stride, (uint32_t *)buf1, src_stride, w, h );
1564
call_a( mc_a.plane_copy_deinterleave_v210, pbuf4, dst_stride, pbuf4+offv, dst_stride, (uint32_t *)buf1, src_stride, w, h );
1565
for( int y = 0; y < h; y++ )
1566
if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*sizeof(uint16_t) ) ||
1567
memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w*sizeof(uint16_t) ) )
1568
{
1569
ok = 0;
1570
fprintf( stderr, "plane_copy_deinterleave_v210 FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1571
break;
1572
}
1573
}
1574
report( "v210 :" );
1575
}
1576
1577
if( mc_a.hpel_filter != mc_ref.hpel_filter )
1578
{
1579
pixel *srchpel = pbuf1+8+2*64;
1580
pixel *dstc[3] = { pbuf3+8, pbuf3+8+16*64, pbuf3+8+32*64 };
1581
pixel *dsta[3] = { pbuf4+8, pbuf4+8+16*64, pbuf4+8+32*64 };
1582
void *tmp = pbuf3+49*64;
1583
set_func_name( "hpel_filter" );
1584
ok = 1; used_asm = 1;
1585
memset( pbuf3, 0, 4096 * sizeof(pixel) );
1586
memset( pbuf4, 0, 4096 * sizeof(pixel) );
1587
call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], srchpel, (intptr_t)64, 48, 10, tmp );
1588
call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], srchpel, (intptr_t)64, 48, 10, tmp );
1589
for( int i = 0; i < 3; i++ )
1590
for( int j = 0; j < 10; j++ )
1591
//FIXME ideally the first pixels would match too, but they aren't actually used
1592
if( memcmp( dstc[i]+j*64+2, dsta[i]+j*64+2, 43 * sizeof(pixel) ) )
1593
{
1594
ok = 0;
1595
fprintf( stderr, "hpel filter differs at plane %c line %d\n", "hvc"[i], j );
1596
for( int k = 0; k < 48; k++ )
1597
printf( "%02x%s", dstc[i][j*64+k], (k+1)&3 ? "" : " " );
1598
printf( "\n" );
1599
for( int k = 0; k < 48; k++ )
1600
printf( "%02x%s", dsta[i][j*64+k], (k+1)&3 ? "" : " " );
1601
printf( "\n" );
1602
break;
1603
}
1604
report( "hpel filter :" );
1605
}
1606
1607
if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
1608
{
1609
pixel *dstc[4] = { pbuf3, pbuf3+1024, pbuf3+2048, pbuf3+3072 };
1610
pixel *dsta[4] = { pbuf4, pbuf4+1024, pbuf4+2048, pbuf4+3072 };
1611
set_func_name( "lowres_init" );
1612
ok = 1; used_asm = 1;
1613
for( int w = 96; w <= 96+24; w += 8 )
1614
{
1615
intptr_t stride = (w*2+31)&~31;
1616
intptr_t stride_lowres = (w+31)&~31;
1617
call_c( mc_c.frame_init_lowres_core, pbuf1, dstc[0], dstc[1], dstc[2], dstc[3], stride, stride_lowres, w, 8 );
1618
call_a( mc_a.frame_init_lowres_core, pbuf1, dsta[0], dsta[1], dsta[2], dsta[3], stride, stride_lowres, w, 8 );
1619
for( int i = 0; i < 8; i++ )
1620
{
1621
for( int j = 0; j < 4; j++ )
1622
if( memcmp( dstc[j]+i*stride_lowres, dsta[j]+i*stride_lowres, w * sizeof(pixel) ) )
1623
{
1624
ok = 0;
1625
fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
1626
for( int k = 0; k < w; k++ )
1627
printf( "%d ", dstc[j][k+i*stride_lowres] );
1628
printf( "\n" );
1629
for( int k = 0; k < w; k++ )
1630
printf( "%d ", dsta[j][k+i*stride_lowres] );
1631
printf( "\n" );
1632
break;
1633
}
1634
}
1635
}
1636
report( "lowres init :" );
1637
}
1638
1639
#define INTEGRAL_INIT( name, size, offset, cmp_len, ... )\
1640
if( mc_a.name != mc_ref.name )\
1641
{\
1642
intptr_t stride = 96;\
1643
set_func_name( #name );\
1644
used_asm = 1;\
1645
memcpy( buf3, buf1, size*2*stride );\
1646
memcpy( buf4, buf1, size*2*stride );\
1647
uint16_t *sum = (uint16_t*)buf3;\
1648
call_c1( mc_c.name, sum+offset, __VA_ARGS__ );\
1649
sum = (uint16_t*)buf4;\
1650
call_a1( mc_a.name, sum+offset, __VA_ARGS__ );\
1651
if( memcmp( buf3+2*offset, buf4+2*offset, cmp_len*2 )\
1652
|| (size>9 && memcmp( buf3+18*stride, buf4+18*stride, (stride-8)*2 )))\
1653
ok = 0;\
1654
call_c2( mc_c.name, sum+offset, __VA_ARGS__ );\
1655
call_a2( mc_a.name, sum+offset, __VA_ARGS__ );\
1656
}
1657
ok = 1; used_asm = 0;
1658
INTEGRAL_INIT( integral_init4h, 2, stride, stride-4, pbuf2, stride );
1659
INTEGRAL_INIT( integral_init8h, 2, stride, stride-8, pbuf2, stride );
1660
INTEGRAL_INIT( integral_init4v, 14, 0, stride-8, sum+9*stride, stride );
1661
INTEGRAL_INIT( integral_init8v, 9, 0, stride-8, stride );
1662
report( "integral init :" );
1663
1664
ok = 1; used_asm = 0;
1665
if( mc_a.mbtree_propagate_cost != mc_ref.mbtree_propagate_cost )
1666
{
1667
used_asm = 1;
1668
x264_emms();
1669
for( int i = 0; i < 10; i++ )
1670
{
1671
float fps_factor = (rand()&65535) / 65535.0f;
1672
set_func_name( "mbtree_propagate_cost" );
1673
int16_t *dsta = (int16_t*)buf3;
1674
int16_t *dstc = dsta+400;
1675
uint16_t *prop = (uint16_t*)buf1;
1676
uint16_t *intra = (uint16_t*)buf4;
1677
uint16_t *inter = intra+128;
1678
uint16_t *qscale = inter+128;
1679
uint16_t *rnd = (uint16_t*)buf2;
1680
x264_emms();
1681
for( int j = 0; j < 100; j++ )
1682
{
1683
intra[j] = *rnd++ & 0x7fff;
1684
intra[j] += !intra[j];
1685
inter[j] = *rnd++ & 0x7fff;
1686
qscale[j] = *rnd++ & 0x7fff;
1687
}
1688
call_c( mc_c.mbtree_propagate_cost, dstc, prop, intra, inter, qscale, &fps_factor, 100 );
1689
call_a( mc_a.mbtree_propagate_cost, dsta, prop, intra, inter, qscale, &fps_factor, 100 );
1690
// I don't care about exact rounding, this is just how close the floating-point implementation happens to be
1691
x264_emms();
1692
for( int j = 0; j < 100 && ok; j++ )
1693
{
1694
ok &= abs( dstc[j]-dsta[j] ) <= 1 || fabs( (double)dstc[j]/dsta[j]-1 ) < 1e-4;
1695
if( !ok )
1696
fprintf( stderr, "mbtree_propagate_cost FAILED: %f !~= %f\n", (double)dstc[j], (double)dsta[j] );
1697
}
1698
}
1699
}
1700
1701
if( mc_a.mbtree_propagate_list != mc_ref.mbtree_propagate_list )
1702
{
1703
used_asm = 1;
1704
for( int i = 0; i < 8; i++ )
1705
{
1706
set_func_name( "mbtree_propagate_list" );
1707
x264_t h;
1708
int height = 4;
1709
int width = 128;
1710
int size = width*height;
1711
h.mb.i_mb_stride = width;
1712
h.mb.i_mb_width = width;
1713
h.mb.i_mb_height = height;
1714
1715
uint16_t *ref_costsc = (uint16_t*)buf3;
1716
uint16_t *ref_costsa = (uint16_t*)buf4;
1717
int16_t (*mvs)[2] = (int16_t(*)[2])(ref_costsc + size);
1718
int16_t *propagate_amount = (int16_t*)(mvs + width);
1719
uint16_t *lowres_costs = (uint16_t*)(propagate_amount + width);
1720
h.scratch_buffer2 = (uint8_t*)(ref_costsa + size);
1721
int bipred_weight = (rand()%63)+1;
1722
int list = i&1;
1723
for( int j = 0; j < size; j++ )
1724
ref_costsc[j] = ref_costsa[j] = rand()&32767;
1725
for( int j = 0; j < width; j++ )
1726
{
1727
static const uint8_t list_dist[2][8] = {{0,1,1,1,1,1,1,1},{1,1,3,3,3,3,3,2}};
1728
for( int k = 0; k < 2; k++ )
1729
mvs[j][k] = (rand()&127) - 64;
1730
propagate_amount[j] = rand()&32767;
1731
lowres_costs[j] = list_dist[list][rand()&7] << LOWRES_COST_SHIFT;
1732
}
1733
1734
call_c1( mc_c.mbtree_propagate_list, &h, ref_costsc, mvs, propagate_amount, lowres_costs, bipred_weight, 0, width, list );
1735
call_a1( mc_a.mbtree_propagate_list, &h, ref_costsa, mvs, propagate_amount, lowres_costs, bipred_weight, 0, width, list );
1736
1737
for( int j = 0; j < size && ok; j++ )
1738
{
1739
ok &= abs(ref_costsa[j] - ref_costsc[j]) <= 1;
1740
if( !ok )
1741
fprintf( stderr, "mbtree_propagate_list FAILED at %d: %d !~= %d\n", j, ref_costsc[j], ref_costsa[j] );
1742
}
1743
1744
call_c2( mc_c.mbtree_propagate_list, &h, ref_costsc, mvs, propagate_amount, lowres_costs, bipred_weight, 0, width, list );
1745
call_a2( mc_a.mbtree_propagate_list, &h, ref_costsa, mvs, propagate_amount, lowres_costs, bipred_weight, 0, width, list );
1746
}
1747
}
1748
report( "mbtree :" );
1749
1750
if( mc_a.memcpy_aligned != mc_ref.memcpy_aligned )
1751
{
1752
set_func_name( "memcpy_aligned" );
1753
ok = 1; used_asm = 1;
1754
for( size_t size = 16; size < 256; size += 16 )
1755
{
1756
memset( buf4, 0xAA, size + 1 );
1757
call_c( mc_c.memcpy_aligned, buf3, buf1, size );
1758
call_a( mc_a.memcpy_aligned, buf4, buf1, size );
1759
if( memcmp( buf3, buf4, size ) || buf4[size] != 0xAA )
1760
{
1761
ok = 0;
1762
fprintf( stderr, "memcpy_aligned FAILED: size=%d\n", (int)size );
1763
break;
1764
}
1765
}
1766
report( "memcpy aligned :" );
1767
}
1768
1769
if( mc_a.memzero_aligned != mc_ref.memzero_aligned )
1770
{
1771
set_func_name( "memzero_aligned" );
1772
ok = 1; used_asm = 1;
1773
for( size_t size = 128; size < 1024; size += 128 )
1774
{
1775
memset( buf4, 0xAA, size + 1 );
1776
call_c( mc_c.memzero_aligned, buf3, size );
1777
call_a( mc_a.memzero_aligned, buf4, size );
1778
if( memcmp( buf3, buf4, size ) || buf4[size] != 0xAA )
1779
{
1780
ok = 0;
1781
fprintf( stderr, "memzero_aligned FAILED: size=%d\n", (int)size );
1782
break;
1783
}
1784
}
1785
report( "memzero aligned :" );
1786
}
1787
1788
return ret;
1789
}
1790
1791
static int check_deblock( int cpu_ref, int cpu_new )
1792
{
1793
x264_deblock_function_t db_c;
1794
x264_deblock_function_t db_ref;
1795
x264_deblock_function_t db_a;
1796
int ret = 0, ok = 1, used_asm = 0;
1797
int alphas[36], betas[36];
1798
int8_t tcs[36][4];
1799
1800
x264_deblock_init( 0, &db_c, 0 );
1801
x264_deblock_init( cpu_ref, &db_ref, 0 );
1802
x264_deblock_init( cpu_new, &db_a, 0 );
1803
1804
/* not exactly the real values of a,b,tc but close enough */
1805
for( int i = 35, a = 255, c = 250; i >= 0; i-- )
1806
{
1807
alphas[i] = a << (BIT_DEPTH-8);
1808
betas[i] = (i+1)/2 << (BIT_DEPTH-8);
1809
tcs[i][0] = tcs[i][3] = (c+6)/10 << (BIT_DEPTH-8);
1810
tcs[i][1] = (c+7)/15 << (BIT_DEPTH-8);
1811
tcs[i][2] = (c+9)/20 << (BIT_DEPTH-8);
1812
a = a*9/10;
1813
c = c*9/10;
1814
}
1815
1816
#define TEST_DEBLOCK( name, align, ... ) \
1817
for( int i = 0; i < 36; i++ ) \
1818
{ \
1819
intptr_t off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */ \
1820
for( int j = 0; j < 1024; j++ ) \
1821
/* two distributions of random to excersize different failure modes */ \
1822
pbuf3[j] = rand() & (i&1 ? 0xf : PIXEL_MAX ); \
1823
memcpy( pbuf4, pbuf3, 1024 * sizeof(pixel) ); \
1824
if( db_a.name != db_ref.name ) \
1825
{ \
1826
set_func_name( #name ); \
1827
used_asm = 1; \
1828
call_c1( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1829
call_a1( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1830
if( memcmp( pbuf3, pbuf4, 1024 * sizeof(pixel) ) ) \
1831
{ \
1832
ok = 0; \
1833
fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
1834
break; \
1835
} \
1836
call_c2( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1837
call_a2( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1838
} \
1839
}
1840
1841
TEST_DEBLOCK( deblock_luma[0], 0, tcs[i] );
1842
TEST_DEBLOCK( deblock_luma[1], 1, tcs[i] );
1843
TEST_DEBLOCK( deblock_h_chroma_420, 0, tcs[i] );
1844
TEST_DEBLOCK( deblock_h_chroma_422, 0, tcs[i] );
1845
TEST_DEBLOCK( deblock_chroma_420_mbaff, 0, tcs[i] );
1846
TEST_DEBLOCK( deblock_chroma_422_mbaff, 0, tcs[i] );
1847
TEST_DEBLOCK( deblock_chroma[1], 1, tcs[i] );
1848
TEST_DEBLOCK( deblock_luma_intra[0], 0 );
1849
TEST_DEBLOCK( deblock_luma_intra[1], 1 );
1850
TEST_DEBLOCK( deblock_h_chroma_420_intra, 0 );
1851
TEST_DEBLOCK( deblock_h_chroma_422_intra, 0 );
1852
TEST_DEBLOCK( deblock_chroma_420_intra_mbaff, 0 );
1853
TEST_DEBLOCK( deblock_chroma_422_intra_mbaff, 0 );
1854
TEST_DEBLOCK( deblock_chroma_intra[1], 1 );
1855
1856
if( db_a.deblock_strength != db_ref.deblock_strength )
1857
{
1858
for( int i = 0; i < 100; i++ )
1859
{
1860
ALIGNED_ARRAY_16( uint8_t, nnz, [X264_SCAN8_SIZE] );
1861
ALIGNED_4( int8_t ref[2][X264_SCAN8_LUMA_SIZE] );
1862
ALIGNED_ARRAY_16( int16_t, mv, [2],[X264_SCAN8_LUMA_SIZE][2] );
1863
ALIGNED_ARRAY_N( uint8_t, bs, [2],[2][8][4] );
1864
memset( bs, 99, sizeof(uint8_t)*2*4*8*2 );
1865
for( int j = 0; j < X264_SCAN8_SIZE; j++ )
1866
nnz[j] = ((rand()&7) == 7) * rand() & 0xf;
1867
for( int j = 0; j < 2; j++ )
1868
for( int k = 0; k < X264_SCAN8_LUMA_SIZE; k++ )
1869
{
1870
ref[j][k] = ((rand()&3) != 3) ? 0 : (rand() & 31) - 2;
1871
for( int l = 0; l < 2; l++ )
1872
mv[j][k][l] = ((rand()&7) != 7) ? (rand()&7) - 3 : (rand()&1023) - 512;
1873
}
1874
set_func_name( "deblock_strength" );
1875
call_c( db_c.deblock_strength, nnz, ref, mv, bs[0], 2<<(i&1), ((i>>1)&1) );
1876
call_a( db_a.deblock_strength, nnz, ref, mv, bs[1], 2<<(i&1), ((i>>1)&1) );
1877
if( memcmp( bs[0], bs[1], sizeof(uint8_t)*2*4*8 ) )
1878
{
1879
ok = 0;
1880
fprintf( stderr, "deblock_strength: [FAILED]\n" );
1881
for( int j = 0; j < 2; j++ )
1882
{
1883
for( int k = 0; k < 2; k++ )
1884
for( int l = 0; l < 4; l++ )
1885
{
1886
for( int m = 0; m < 4; m++ )
1887
printf("%d ",bs[j][k][l][m]);
1888
printf("\n");
1889
}
1890
printf("\n");
1891
}
1892
break;
1893
}
1894
}
1895
}
1896
1897
report( "deblock :" );
1898
1899
return ret;
1900
}
1901
1902
static int check_quant( int cpu_ref, int cpu_new )
1903
{
1904
x264_quant_function_t qf_c;
1905
x264_quant_function_t qf_ref;
1906
x264_quant_function_t qf_a;
1907
ALIGNED_ARRAY_N( dctcoef, dct1,[64] );
1908
ALIGNED_ARRAY_N( dctcoef, dct2,[64] );
1909
ALIGNED_ARRAY_N( dctcoef, dct3,[8],[16] );
1910
ALIGNED_ARRAY_N( dctcoef, dct4,[8],[16] );
1911
ALIGNED_ARRAY_N( uint8_t, cqm_buf,[64] );
1912
int ret = 0, ok, used_asm;
1913
int oks[3] = {1,1,1}, used_asms[3] = {0,0,0};
1914
x264_t h_buf;
1915
x264_t *h = &h_buf;
1916
memset( h, 0, sizeof(*h) );
1917
h->sps->i_chroma_format_idc = 1;
1918
x264_param_default( &h->param );
1919
h->chroma_qp_table = i_chroma_qp_table + 12;
1920
h->param.analyse.b_transform_8x8 = 1;
1921
1922
for( int i_cqm = 0; i_cqm < 4; i_cqm++ )
1923
{
1924
if( i_cqm == 0 )
1925
{
1926
for( int i = 0; i < 6; i++ )
1927
h->pps->scaling_list[i] = x264_cqm_flat16;
1928
h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_FLAT;
1929
}
1930
else if( i_cqm == 1 )
1931
{
1932
for( int i = 0; i < 6; i++ )
1933
h->pps->scaling_list[i] = x264_cqm_jvt[i];
1934
h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_JVT;
1935
}
1936
else
1937
{
1938
int max_scale = BIT_DEPTH < 10 ? 255 : 228;
1939
if( i_cqm == 2 )
1940
for( int i = 0; i < 64; i++ )
1941
cqm_buf[i] = 10 + rand() % (max_scale - 9);
1942
else
1943
for( int i = 0; i < 64; i++ )
1944
cqm_buf[i] = 1;
1945
for( int i = 0; i < 6; i++ )
1946
h->pps->scaling_list[i] = cqm_buf;
1947
h->param.i_cqm_preset = h->pps->i_cqm_preset = X264_CQM_CUSTOM;
1948
}
1949
1950
h->param.rc.i_qp_min = 0;
1951
h->param.rc.i_qp_max = QP_MAX_SPEC;
1952
x264_cqm_init( h );
1953
x264_quant_init( h, 0, &qf_c );
1954
x264_quant_init( h, cpu_ref, &qf_ref );
1955
x264_quant_init( h, cpu_new, &qf_a );
1956
1957
#define INIT_QUANT8(j,max) \
1958
{ \
1959
static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
1960
for( int i = 0; i < max; i++ ) \
1961
{ \
1962
unsigned int scale = (255*scale1d[(i>>3)&7]*scale1d[i&7])/16; \
1963
dct1[i] = dct2[i] = (j>>(i>>6))&1 ? (rand()%(2*scale+1))-scale : 0; \
1964
} \
1965
}
1966
1967
#define INIT_QUANT4(j,max) \
1968
{ \
1969
static const int scale1d[4] = {4,6,4,6}; \
1970
for( int i = 0; i < max; i++ ) \
1971
{ \
1972
unsigned int scale = 255*scale1d[(i>>2)&3]*scale1d[i&3]; \
1973
dct1[i] = dct2[i] = (j>>(i>>4))&1 ? (rand()%(2*scale+1))-scale : 0; \
1974
} \
1975
}
1976
1977
#define TEST_QUANT_DC( name, cqm ) \
1978
if( qf_a.name != qf_ref.name ) \
1979
{ \
1980
set_func_name( #name ); \
1981
used_asms[0] = 1; \
1982
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
1983
{ \
1984
for( int j = 0; j < 2; j++ ) \
1985
{ \
1986
int result_c, result_a; \
1987
for( int i = 0; i < 16; i++ ) \
1988
dct1[i] = dct2[i] = j ? (rand() & 0x1fff) - 0xfff : 0; \
1989
result_c = call_c1( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1990
result_a = call_a1( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1991
if( memcmp( dct1, dct2, 16*sizeof(dctcoef) ) || result_c != result_a ) \
1992
{ \
1993
oks[0] = 0; \
1994
fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \
1995
break; \
1996
} \
1997
call_c2( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1998
call_a2( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
1999
} \
2000
} \
2001
}
2002
2003
#define TEST_QUANT( qname, block, type, w, maxj ) \
2004
if( qf_a.qname != qf_ref.qname ) \
2005
{ \
2006
set_func_name( #qname ); \
2007
used_asms[0] = 1; \
2008
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2009
{ \
2010
for( int j = 0; j < maxj; j++ ) \
2011
{ \
2012
INIT_QUANT##type(j, w*w) \
2013
int result_c = call_c1( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2014
int result_a = call_a1( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2015
if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) || result_c != result_a ) \
2016
{ \
2017
oks[0] = 0; \
2018
fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2019
break; \
2020
} \
2021
call_c2( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2022
call_a2( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2023
} \
2024
} \
2025
}
2026
2027
TEST_QUANT( quant_8x8, CQM_8IY, 8, 8, 2 );
2028
TEST_QUANT( quant_8x8, CQM_8PY, 8, 8, 2 );
2029
TEST_QUANT( quant_4x4, CQM_4IY, 4, 4, 2 );
2030
TEST_QUANT( quant_4x4, CQM_4PY, 4, 4, 2 );
2031
TEST_QUANT( quant_4x4x4, CQM_4IY, 4, 8, 16 );
2032
TEST_QUANT( quant_4x4x4, CQM_4PY, 4, 8, 16 );
2033
TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
2034
TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
2035
2036
#define TEST_DEQUANT( qname, dqname, block, w ) \
2037
if( qf_a.dqname != qf_ref.dqname ) \
2038
{ \
2039
set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
2040
used_asms[1] = 1; \
2041
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2042
{ \
2043
INIT_QUANT##w(1, w*w) \
2044
qf_c.qname( dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
2045
memcpy( dct2, dct1, w*w*sizeof(dctcoef) ); \
2046
call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2047
call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2048
if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) ) \
2049
{ \
2050
oks[1] = 0; \
2051
fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2052
break; \
2053
} \
2054
call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2055
call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2056
} \
2057
}
2058
2059
TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 );
2060
TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 );
2061
TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 );
2062
TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 );
2063
2064
#define TEST_DEQUANT_DC( qname, dqname, block, w ) \
2065
if( qf_a.dqname != qf_ref.dqname ) \
2066
{ \
2067
set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
2068
used_asms[1] = 1; \
2069
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2070
{ \
2071
for( int i = 0; i < 16; i++ ) \
2072
dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16; \
2073
qf_c.qname( dct1, h->quant##w##_mf[block][qp][0]>>1, h->quant##w##_bias[block][qp][0]>>1 ); \
2074
memcpy( dct2, dct1, w*w*sizeof(dctcoef) ); \
2075
call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2076
call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2077
if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) ) \
2078
{ \
2079
oks[1] = 0; \
2080
fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2081
} \
2082
call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2083
call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2084
} \
2085
}
2086
2087
TEST_DEQUANT_DC( quant_4x4_dc, dequant_4x4_dc, CQM_4IY, 4 );
2088
2089
if( qf_a.idct_dequant_2x4_dc != qf_ref.idct_dequant_2x4_dc )
2090
{
2091
set_func_name( "idct_dequant_2x4_dc_%s", i_cqm?"cqm":"flat" );
2092
used_asms[1] = 1;
2093
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- )
2094
{
2095
for( int i = 0; i < 8; i++ )
2096
dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16;
2097
qf_c.quant_2x2_dc( &dct1[0], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2098
qf_c.quant_2x2_dc( &dct1[4], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2099
call_c( qf_c.idct_dequant_2x4_dc, dct1, dct3, h->dequant4_mf[CQM_4IC], qp+3 );
2100
call_a( qf_a.idct_dequant_2x4_dc, dct1, dct4, h->dequant4_mf[CQM_4IC], qp+3 );
2101
for( int i = 0; i < 8; i++ )
2102
if( dct3[i][0] != dct4[i][0] )
2103
{
2104
oks[1] = 0;
2105
fprintf( stderr, "idct_dequant_2x4_dc (qp=%d, cqm=%d): [FAILED]\n", qp, i_cqm );
2106
break;
2107
}
2108
}
2109
}
2110
2111
if( qf_a.idct_dequant_2x4_dconly != qf_ref.idct_dequant_2x4_dconly )
2112
{
2113
set_func_name( "idct_dequant_2x4_dc_%s", i_cqm?"cqm":"flat" );
2114
used_asms[1] = 1;
2115
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- )
2116
{
2117
for( int i = 0; i < 8; i++ )
2118
dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16;
2119
qf_c.quant_2x2_dc( &dct1[0], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2120
qf_c.quant_2x2_dc( &dct1[4], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2121
memcpy( dct2, dct1, 8*sizeof(dctcoef) );
2122
call_c1( qf_c.idct_dequant_2x4_dconly, dct1, h->dequant4_mf[CQM_4IC], qp+3 );
2123
call_a1( qf_a.idct_dequant_2x4_dconly, dct2, h->dequant4_mf[CQM_4IC], qp+3 );
2124
if( memcmp( dct1, dct2, 8*sizeof(dctcoef) ) )
2125
{
2126
oks[1] = 0;
2127
fprintf( stderr, "idct_dequant_2x4_dconly (qp=%d, cqm=%d): [FAILED]\n", qp, i_cqm );
2128
break;
2129
}
2130
call_c2( qf_c.idct_dequant_2x4_dconly, dct1, h->dequant4_mf[CQM_4IC], qp+3 );
2131
call_a2( qf_a.idct_dequant_2x4_dconly, dct2, h->dequant4_mf[CQM_4IC], qp+3 );
2132
}
2133
}
2134
2135
#define TEST_OPTIMIZE_CHROMA_DC( optname, size ) \
2136
if( qf_a.optname != qf_ref.optname ) \
2137
{ \
2138
set_func_name( #optname ); \
2139
used_asms[2] = 1; \
2140
for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2141
{ \
2142
int qpdc = qp + (size == 8 ? 3 : 0); \
2143
int dmf = h->dequant4_mf[CQM_4IC][qpdc%6][0] << qpdc/6; \
2144
if( dmf > 32*64 ) \
2145
continue; \
2146
for( int i = 16; ; i <<= 1 ) \
2147
{ \
2148
int res_c, res_asm; \
2149
int max = X264_MIN( i, PIXEL_MAX*16 ); \
2150
for( int j = 0; j < size; j++ ) \
2151
dct1[j] = rand()%(max*2+1) - max; \
2152
for( int j = 0; i <= size; j += 4 ) \
2153
qf_c.quant_2x2_dc( &dct1[j], h->quant4_mf[CQM_4IC][qpdc][0]>>1, h->quant4_bias[CQM_4IC][qpdc][0]>>1 ); \
2154
memcpy( dct2, dct1, size*sizeof(dctcoef) ); \
2155
res_c = call_c1( qf_c.optname, dct1, dmf ); \
2156
res_asm = call_a1( qf_a.optname, dct2, dmf ); \
2157
if( res_c != res_asm || memcmp( dct1, dct2, size*sizeof(dctcoef) ) ) \
2158
{ \
2159
oks[2] = 0; \
2160
fprintf( stderr, #optname "(qp=%d, res_c=%d, res_asm=%d): [FAILED]\n", qp, res_c, res_asm ); \
2161
} \
2162
call_c2( qf_c.optname, dct1, dmf ); \
2163
call_a2( qf_a.optname, dct2, dmf ); \
2164
if( i >= PIXEL_MAX*16 ) \
2165
break; \
2166
} \
2167
} \
2168
}
2169
2170
TEST_OPTIMIZE_CHROMA_DC( optimize_chroma_2x2_dc, 4 );
2171
TEST_OPTIMIZE_CHROMA_DC( optimize_chroma_2x4_dc, 8 );
2172
2173
x264_cqm_delete( h );
2174
}
2175
2176
ok = oks[0]; used_asm = used_asms[0];
2177
report( "quant :" );
2178
2179
ok = oks[1]; used_asm = used_asms[1];
2180
report( "dequant :" );
2181
2182
ok = oks[2]; used_asm = used_asms[2];
2183
report( "optimize chroma dc :" );
2184
2185
ok = 1; used_asm = 0;
2186
if( qf_a.denoise_dct != qf_ref.denoise_dct )
2187
{
2188
used_asm = 1;
2189
for( int size = 16; size <= 64; size += 48 )
2190
{
2191
set_func_name( "denoise_dct" );
2192
memcpy( dct1, buf1, size*sizeof(dctcoef) );
2193
memcpy( dct2, buf1, size*sizeof(dctcoef) );
2194
memcpy( buf3+256, buf3, 256 );
2195
call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
2196
call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
2197
if( memcmp( dct1, dct2, size*sizeof(dctcoef) ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
2198
ok = 0;
2199
call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
2200
call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
2201
}
2202
}
2203
report( "denoise dct :" );
2204
2205
#define TEST_DECIMATE( decname, w, ac, thresh ) \
2206
if( qf_a.decname != qf_ref.decname ) \
2207
{ \
2208
set_func_name( #decname ); \
2209
used_asm = 1; \
2210
for( int i = 0; i < 100; i++ ) \
2211
{ \
2212
static const int distrib[16] = {1,1,1,1,1,1,1,1,1,1,1,1,2,3,4};\
2213
static const int zerorate_lut[4] = {3,7,15,31};\
2214
int zero_rate = zerorate_lut[i&3];\
2215
for( int idx = 0; idx < w*w; idx++ ) \
2216
{ \
2217
int sign = (rand()&1) ? -1 : 1; \
2218
int abs_level = distrib[rand()&15]; \
2219
if( abs_level == 4 ) abs_level = rand()&0x3fff; \
2220
int zero = !(rand()&zero_rate); \
2221
dct1[idx] = zero * abs_level * sign; \
2222
} \
2223
if( ac ) \
2224
dct1[0] = 0; \
2225
int result_c = call_c( qf_c.decname, dct1 ); \
2226
int result_a = call_a( qf_a.decname, dct1 ); \
2227
if( X264_MIN(result_c,thresh) != X264_MIN(result_a,thresh) ) \
2228
{ \
2229
ok = 0; \
2230
fprintf( stderr, #decname ": [FAILED]\n" ); \
2231
break; \
2232
} \
2233
} \
2234
}
2235
2236
ok = 1; used_asm = 0;
2237
TEST_DECIMATE( decimate_score64, 8, 0, 6 );
2238
TEST_DECIMATE( decimate_score16, 4, 0, 6 );
2239
TEST_DECIMATE( decimate_score15, 4, 1, 7 );
2240
report( "decimate_score :" );
2241
2242
#define TEST_LAST( last, lastname, size, ac ) \
2243
if( qf_a.last != qf_ref.last ) \
2244
{ \
2245
set_func_name( #lastname ); \
2246
used_asm = 1; \
2247
for( int i = 0; i < 100; i++ ) \
2248
{ \
2249
int nnz = 0; \
2250
int max = rand() & (size-1); \
2251
memset( dct1, 0, size*sizeof(dctcoef) ); \
2252
for( int idx = ac; idx < max; idx++ ) \
2253
nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
2254
if( !nnz ) \
2255
dct1[ac] = 1; \
2256
int result_c = call_c( qf_c.last, dct1+ac ); \
2257
int result_a = call_a( qf_a.last, dct1+ac ); \
2258
if( result_c != result_a ) \
2259
{ \
2260
ok = 0; \
2261
fprintf( stderr, #lastname ": [FAILED]\n" ); \
2262
break; \
2263
} \
2264
} \
2265
}
2266
2267
ok = 1; used_asm = 0;
2268
TEST_LAST( coeff_last4 , coeff_last4, 4, 0 );
2269
TEST_LAST( coeff_last8 , coeff_last8, 8, 0 );
2270
TEST_LAST( coeff_last[ DCT_LUMA_AC], coeff_last15, 16, 1 );
2271
TEST_LAST( coeff_last[ DCT_LUMA_4x4], coeff_last16, 16, 0 );
2272
TEST_LAST( coeff_last[ DCT_LUMA_8x8], coeff_last64, 64, 0 );
2273
report( "coeff_last :" );
2274
2275
#define TEST_LEVELRUN( lastname, name, size, ac ) \
2276
if( qf_a.lastname != qf_ref.lastname ) \
2277
{ \
2278
set_func_name( #name ); \
2279
used_asm = 1; \
2280
for( int i = 0; i < 100; i++ ) \
2281
{ \
2282
x264_run_level_t runlevel_c, runlevel_a; \
2283
int nnz = 0; \
2284
int max = rand() & (size-1); \
2285
memset( dct1, 0, size*sizeof(dctcoef) ); \
2286
memcpy( &runlevel_a, buf1+i, sizeof(x264_run_level_t) ); \
2287
memcpy( &runlevel_c, buf1+i, sizeof(x264_run_level_t) ); \
2288
for( int idx = ac; idx < max; idx++ ) \
2289
nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
2290
if( !nnz ) \
2291
dct1[ac] = 1; \
2292
int result_c = call_c( qf_c.lastname, dct1+ac, &runlevel_c ); \
2293
int result_a = call_a( qf_a.lastname, dct1+ac, &runlevel_a ); \
2294
if( result_c != result_a || runlevel_c.last != runlevel_a.last || \
2295
runlevel_c.mask != runlevel_a.mask || \
2296
memcmp(runlevel_c.level, runlevel_a.level, sizeof(dctcoef)*result_c)) \
2297
{ \
2298
ok = 0; \
2299
fprintf( stderr, #name ": [FAILED]\n" ); \
2300
break; \
2301
} \
2302
} \
2303
}
2304
2305
ok = 1; used_asm = 0;
2306
TEST_LEVELRUN( coeff_level_run4 , coeff_level_run4, 4, 0 );
2307
TEST_LEVELRUN( coeff_level_run8 , coeff_level_run8, 8, 0 );
2308
TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_AC], coeff_level_run15, 16, 1 );
2309
TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_4x4], coeff_level_run16, 16, 0 );
2310
report( "coeff_level_run :" );
2311
2312
return ret;
2313
}
2314
2315
static int check_intra( int cpu_ref, int cpu_new )
2316
{
2317
int ret = 0, ok = 1, used_asm = 0;
2318
ALIGNED_ARRAY_32( pixel, edge,[36] );
2319
ALIGNED_ARRAY_32( pixel, edge2,[36] );
2320
ALIGNED_ARRAY_32( pixel, fdec,[FDEC_STRIDE*20] );
2321
struct
2322
{
2323
x264_predict_t predict_16x16[4+3];
2324
x264_predict_t predict_8x8c[4+3];
2325
x264_predict_t predict_8x16c[4+3];
2326
x264_predict8x8_t predict_8x8[9+3];
2327
x264_predict_t predict_4x4[9+3];
2328
x264_predict_8x8_filter_t predict_8x8_filter;
2329
} ip_c, ip_ref, ip_a;
2330
2331
x264_predict_16x16_init( 0, ip_c.predict_16x16 );
2332
x264_predict_8x8c_init( 0, ip_c.predict_8x8c );
2333
x264_predict_8x16c_init( 0, ip_c.predict_8x16c );
2334
x264_predict_8x8_init( 0, ip_c.predict_8x8, &ip_c.predict_8x8_filter );
2335
x264_predict_4x4_init( 0, ip_c.predict_4x4 );
2336
2337
x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 );
2338
x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c );
2339
x264_predict_8x16c_init( cpu_ref, ip_ref.predict_8x16c );
2340
x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8, &ip_ref.predict_8x8_filter );
2341
x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 );
2342
2343
x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 );
2344
x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c );
2345
x264_predict_8x16c_init( cpu_new, ip_a.predict_8x16c );
2346
x264_predict_8x8_init( cpu_new, ip_a.predict_8x8, &ip_a.predict_8x8_filter );
2347
x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 );
2348
2349
memcpy( fdec, pbuf1, 32*20 * sizeof(pixel) );\
2350
2351
ip_c.predict_8x8_filter( fdec+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
2352
2353
#define INTRA_TEST( name, dir, w, h, align, bench, ... )\
2354
if( ip_a.name[dir] != ip_ref.name[dir] )\
2355
{\
2356
set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\
2357
used_asm = 1;\
2358
memcpy( pbuf3, fdec, FDEC_STRIDE*20 * sizeof(pixel) );\
2359
memcpy( pbuf4, fdec, FDEC_STRIDE*20 * sizeof(pixel) );\
2360
for( int a = 0; a < (do_bench ? 64/sizeof(pixel) : 1); a += align )\
2361
{\
2362
call_c##bench( ip_c.name[dir], pbuf3+48+a, ##__VA_ARGS__ );\
2363
call_a##bench( ip_a.name[dir], pbuf4+48+a, ##__VA_ARGS__ );\
2364
if( memcmp( pbuf3, pbuf4, FDEC_STRIDE*20 * sizeof(pixel) ) )\
2365
{\
2366
fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\
2367
ok = 0;\
2368
if( ip_c.name == (void *)ip_c.predict_8x8 )\
2369
{\
2370
for( int k = -1; k < 16; k++ )\
2371
printf( "%2x ", edge[16+k] );\
2372
printf( "\n" );\
2373
}\
2374
for( int j = 0; j < h; j++ )\
2375
{\
2376
if( ip_c.name == (void *)ip_c.predict_8x8 )\
2377
printf( "%2x ", edge[14-j] );\
2378
for( int k = 0; k < w; k++ )\
2379
printf( "%2x ", pbuf4[48+k+j*FDEC_STRIDE] );\
2380
printf( "\n" );\
2381
}\
2382
printf( "\n" );\
2383
for( int j = 0; j < h; j++ )\
2384
{\
2385
if( ip_c.name == (void *)ip_c.predict_8x8 )\
2386
printf( " " );\
2387
for( int k = 0; k < w; k++ )\
2388
printf( "%2x ", pbuf3[48+k+j*FDEC_STRIDE] );\
2389
printf( "\n" );\
2390
}\
2391
break;\
2392
}\
2393
}\
2394
}
2395
2396
for( int i = 0; i < 12; i++ )
2397
INTRA_TEST( predict_4x4, i, 4, 4, 4, );
2398
for( int i = 0; i < 7; i++ )
2399
INTRA_TEST( predict_8x8c, i, 8, 8, 16, );
2400
for( int i = 0; i < 7; i++ )
2401
INTRA_TEST( predict_8x16c, i, 8, 16, 16, );
2402
for( int i = 0; i < 7; i++ )
2403
INTRA_TEST( predict_16x16, i, 16, 16, 16, );
2404
for( int i = 0; i < 12; i++ )
2405
INTRA_TEST( predict_8x8, i, 8, 8, 8, , edge );
2406
2407
set_func_name("intra_predict_8x8_filter");
2408
if( ip_a.predict_8x8_filter != ip_ref.predict_8x8_filter )
2409
{
2410
used_asm = 1;
2411
for( int i = 0; i < 32; i++ )
2412
{
2413
if( !(i&7) || ((i&MB_TOPRIGHT) && !(i&MB_TOP)) )
2414
continue;
2415
int neighbor = (i&24)>>1;
2416
memset( edge, 0, 36*sizeof(pixel) );
2417
memset( edge2, 0, 36*sizeof(pixel) );
2418
call_c( ip_c.predict_8x8_filter, pbuf1+48, edge, neighbor, i&7 );
2419
call_a( ip_a.predict_8x8_filter, pbuf1+48, edge2, neighbor, i&7 );
2420
if( !(neighbor&MB_TOPLEFT) )
2421
edge[15] = edge2[15] = 0;
2422
if( memcmp( edge+7, edge2+7, (i&MB_TOPRIGHT ? 26 : i&MB_TOP ? 17 : 8) * sizeof(pixel) ) )
2423
{
2424
fprintf( stderr, "predict_8x8_filter : [FAILED] %d %d\n", (i&24)>>1, i&7);
2425
ok = 0;
2426
}
2427
}
2428
}
2429
2430
#define EXTREMAL_PLANE( w, h ) \
2431
{ \
2432
int max[7]; \
2433
for( int j = 0; j < 7; j++ ) \
2434
max[j] = test ? rand()&PIXEL_MAX : PIXEL_MAX; \
2435
fdec[48-1-FDEC_STRIDE] = (i&1)*max[0]; \
2436
for( int j = 0; j < w/2; j++ ) \
2437
fdec[48+j-FDEC_STRIDE] = (!!(i&2))*max[1]; \
2438
for( int j = w/2; j < w-1; j++ ) \
2439
fdec[48+j-FDEC_STRIDE] = (!!(i&4))*max[2]; \
2440
fdec[48+(w-1)-FDEC_STRIDE] = (!!(i&8))*max[3]; \
2441
for( int j = 0; j < h/2; j++ ) \
2442
fdec[48+j*FDEC_STRIDE-1] = (!!(i&16))*max[4]; \
2443
for( int j = h/2; j < h-1; j++ ) \
2444
fdec[48+j*FDEC_STRIDE-1] = (!!(i&32))*max[5]; \
2445
fdec[48+(h-1)*FDEC_STRIDE-1] = (!!(i&64))*max[6]; \
2446
}
2447
/* Extremal test case for planar prediction. */
2448
for( int test = 0; test < 100 && ok; test++ )
2449
for( int i = 0; i < 128 && ok; i++ )
2450
{
2451
EXTREMAL_PLANE( 8, 8 );
2452
INTRA_TEST( predict_8x8c, I_PRED_CHROMA_P, 8, 8, 64, 1 );
2453
EXTREMAL_PLANE( 8, 16 );
2454
INTRA_TEST( predict_8x16c, I_PRED_CHROMA_P, 8, 16, 64, 1 );
2455
EXTREMAL_PLANE( 16, 16 );
2456
INTRA_TEST( predict_16x16, I_PRED_16x16_P, 16, 16, 64, 1 );
2457
}
2458
report( "intra pred :" );
2459
return ret;
2460
}
2461
2462
#define DECL_CABAC(cpu) \
2463
static void run_cabac_decision_##cpu( x264_t *h, uint8_t *dst )\
2464
{\
2465
x264_cabac_t cb;\
2466
x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2467
x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2468
for( int i = 0; i < 0x1000; i++ )\
2469
x264_cabac_encode_decision_##cpu( &cb, buf1[i]>>1, buf1[i]&1 );\
2470
}\
2471
static void run_cabac_bypass_##cpu( x264_t *h, uint8_t *dst )\
2472
{\
2473
x264_cabac_t cb;\
2474
x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2475
x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2476
for( int i = 0; i < 0x1000; i++ )\
2477
x264_cabac_encode_bypass_##cpu( &cb, buf1[i]&1 );\
2478
}\
2479
static void run_cabac_terminal_##cpu( x264_t *h, uint8_t *dst )\
2480
{\
2481
x264_cabac_t cb;\
2482
x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2483
x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2484
for( int i = 0; i < 0x1000; i++ )\
2485
x264_cabac_encode_terminal_##cpu( &cb );\
2486
}
2487
DECL_CABAC(c)
2488
#if HAVE_MMX
2489
DECL_CABAC(asm)
2490
#elif defined(ARCH_AARCH64)
2491
DECL_CABAC(asm)
2492
#else
2493
#define run_cabac_decision_asm run_cabac_decision_c
2494
#define run_cabac_bypass_asm run_cabac_bypass_c
2495
#define run_cabac_terminal_asm run_cabac_terminal_c
2496
#endif
2497
2498
extern const uint8_t x264_count_cat_m1[14];
2499
void x264_cabac_block_residual_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
2500
void x264_cabac_block_residual_8x8_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
2501
void x264_cabac_block_residual_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l );
2502
2503
static int check_cabac( int cpu_ref, int cpu_new )
2504
{
2505
int ret = 0, ok = 1, used_asm = 0;
2506
x264_t h;
2507
h.sps->i_chroma_format_idc = 3;
2508
2509
x264_bitstream_function_t bs_ref;
2510
x264_bitstream_function_t bs_a;
2511
x264_bitstream_init( cpu_ref, &bs_ref );
2512
x264_bitstream_init( cpu_new, &bs_a );
2513
x264_quant_init( &h, cpu_new, &h.quantf );
2514
h.quantf.coeff_last[DCT_CHROMA_DC] = h.quantf.coeff_last4;
2515
2516
#define CABAC_RESIDUAL(name, start, end, rd)\
2517
{\
2518
if( bs_a.name##_internal && (bs_a.name##_internal != bs_ref.name##_internal || (cpu_new&X264_CPU_SSE2_IS_SLOW)) )\
2519
{\
2520
used_asm = 1;\
2521
set_func_name( #name );\
2522
for( int i = 0; i < 2; i++ )\
2523
{\
2524
for( intptr_t ctx_block_cat = start; ctx_block_cat <= end; ctx_block_cat++ )\
2525
{\
2526
for( int j = 0; j < 256; j++ )\
2527
{\
2528
ALIGNED_ARRAY_N( dctcoef, dct, [2],[64] );\
2529
uint8_t bitstream[2][1<<16];\
2530
static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};\
2531
int ac = ctx_ac[ctx_block_cat];\
2532
int nz = 0;\
2533
while( !nz )\
2534
{\
2535
for( int k = 0; k <= x264_count_cat_m1[ctx_block_cat]; k++ )\
2536
{\
2537
/* Very rough distribution that covers possible inputs */\
2538
int rnd = rand();\
2539
int coef = !(rnd&3);\
2540
coef += !(rnd& 15) * (rand()&0x0006);\
2541
coef += !(rnd& 63) * (rand()&0x0008);\
2542
coef += !(rnd& 255) * (rand()&0x00F0);\
2543
coef += !(rnd&1023) * (rand()&0x7F00);\
2544
nz |= dct[0][ac+k] = dct[1][ac+k] = coef * ((rand()&1) ? 1 : -1);\
2545
}\
2546
}\
2547
h.mb.b_interlaced = i;\
2548
x264_cabac_t cb[2];\
2549
x264_cabac_context_init( &h, &cb[0], SLICE_TYPE_P, 26, 0 );\
2550
x264_cabac_context_init( &h, &cb[1], SLICE_TYPE_P, 26, 0 );\
2551
x264_cabac_encode_init( &cb[0], bitstream[0], bitstream[0]+0xfff0 );\
2552
x264_cabac_encode_init( &cb[1], bitstream[1], bitstream[1]+0xfff0 );\
2553
cb[0].f8_bits_encoded = 0;\
2554
cb[1].f8_bits_encoded = 0;\
2555
if( !rd ) memcpy( bitstream[1], bitstream[0], 0x400 );\
2556
call_c1( x264_##name##_c, &h, &cb[0], ctx_block_cat, dct[0]+ac );\
2557
call_a1( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, &cb[1] );\
2558
ok = cb[0].f8_bits_encoded == cb[1].f8_bits_encoded && !memcmp(cb[0].state, cb[1].state, 1024);\
2559
if( !rd ) ok |= !memcmp( bitstream[1], bitstream[0], 0x400 ) && !memcmp( &cb[1], &cb[0], offsetof(x264_cabac_t, p_start) );\
2560
if( !ok )\
2561
{\
2562
fprintf( stderr, #name " : [FAILED] ctx_block_cat %d", (int)ctx_block_cat );\
2563
if( rd && cb[0].f8_bits_encoded != cb[1].f8_bits_encoded )\
2564
fprintf( stderr, " (%d != %d)", cb[0].f8_bits_encoded, cb[1].f8_bits_encoded );\
2565
fprintf( stderr, "\n");\
2566
goto name##fail;\
2567
}\
2568
if( (j&15) == 0 )\
2569
{\
2570
call_c2( x264_##name##_c, &h, &cb[0], ctx_block_cat, dct[0]+ac );\
2571
call_a2( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, &cb[1] );\
2572
}\
2573
}\
2574
}\
2575
}\
2576
}\
2577
}\
2578
name##fail:
2579
2580
CABAC_RESIDUAL( cabac_block_residual, 0, DCT_LUMA_8x8, 0 )
2581
report( "cabac residual:" );
2582
2583
ok = 1; used_asm = 0;
2584
CABAC_RESIDUAL( cabac_block_residual_rd, 0, DCT_LUMA_8x8-1, 1 )
2585
CABAC_RESIDUAL( cabac_block_residual_8x8_rd, DCT_LUMA_8x8, DCT_LUMA_8x8, 1 )
2586
report( "cabac residual rd:" );
2587
2588
if( cpu_ref || run_cabac_decision_c == run_cabac_decision_asm )
2589
return ret;
2590
ok = 1; used_asm = 0;
2591
x264_cabac_init( &h );
2592
2593
set_func_name( "cabac_encode_decision" );
2594
memcpy( buf4, buf3, 0x1000 );
2595
call_c( run_cabac_decision_c, &h, buf3 );
2596
call_a( run_cabac_decision_asm, &h, buf4 );
2597
ok = !memcmp( buf3, buf4, 0x1000 );
2598
report( "cabac decision:" );
2599
2600
set_func_name( "cabac_encode_bypass" );
2601
memcpy( buf4, buf3, 0x1000 );
2602
call_c( run_cabac_bypass_c, &h, buf3 );
2603
call_a( run_cabac_bypass_asm, &h, buf4 );
2604
ok = !memcmp( buf3, buf4, 0x1000 );
2605
report( "cabac bypass:" );
2606
2607
set_func_name( "cabac_encode_terminal" );
2608
memcpy( buf4, buf3, 0x1000 );
2609
call_c( run_cabac_terminal_c, &h, buf3 );
2610
call_a( run_cabac_terminal_asm, &h, buf4 );
2611
ok = !memcmp( buf3, buf4, 0x1000 );
2612
report( "cabac terminal:" );
2613
2614
return ret;
2615
}
2616
2617
static int check_bitstream( int cpu_ref, int cpu_new )
2618
{
2619
x264_bitstream_function_t bs_c;
2620
x264_bitstream_function_t bs_ref;
2621
x264_bitstream_function_t bs_a;
2622
2623
int ret = 0, ok = 1, used_asm = 0;
2624
2625
x264_bitstream_init( 0, &bs_c );
2626
x264_bitstream_init( cpu_ref, &bs_ref );
2627
x264_bitstream_init( cpu_new, &bs_a );
2628
if( bs_a.nal_escape != bs_ref.nal_escape )
2629
{
2630
int size = 0x4000;
2631
uint8_t *input = malloc(size+100);
2632
uint8_t *output1 = malloc(size*2);
2633
uint8_t *output2 = malloc(size*2);
2634
used_asm = 1;
2635
set_func_name( "nal_escape" );
2636
for( int i = 0; i < 100; i++ )
2637
{
2638
/* Test corner-case sizes */
2639
int test_size = i < 10 ? i+1 : rand() & 0x3fff;
2640
/* Test 8 different probability distributions of zeros */
2641
for( int j = 0; j < test_size+32; j++ )
2642
input[j] = (rand()&((1 << ((i&7)+1)) - 1)) * rand();
2643
uint8_t *end_c = (uint8_t*)call_c1( bs_c.nal_escape, output1, input, input+test_size );
2644
uint8_t *end_a = (uint8_t*)call_a1( bs_a.nal_escape, output2, input, input+test_size );
2645
int size_c = end_c-output1;
2646
int size_a = end_a-output2;
2647
if( size_c != size_a || memcmp( output1, output2, size_c ) )
2648
{
2649
fprintf( stderr, "nal_escape : [FAILED] %d %d\n", size_c, size_a );
2650
ok = 0;
2651
break;
2652
}
2653
}
2654
for( int j = 0; j < size+32; j++ )
2655
input[j] = rand();
2656
call_c2( bs_c.nal_escape, output1, input, input+size );
2657
call_a2( bs_a.nal_escape, output2, input, input+size );
2658
free(input);
2659
free(output1);
2660
free(output2);
2661
}
2662
report( "nal escape:" );
2663
2664
return ret;
2665
}
2666
2667
static int check_all_funcs( int cpu_ref, int cpu_new )
2668
{
2669
return check_pixel( cpu_ref, cpu_new )
2670
+ check_dct( cpu_ref, cpu_new )
2671
+ check_mc( cpu_ref, cpu_new )
2672
+ check_intra( cpu_ref, cpu_new )
2673
+ check_deblock( cpu_ref, cpu_new )
2674
+ check_quant( cpu_ref, cpu_new )
2675
+ check_cabac( cpu_ref, cpu_new )
2676
+ check_bitstream( cpu_ref, cpu_new );
2677
}
2678
2679
static int add_flags( int *cpu_ref, int *cpu_new, int flags, const char *name )
2680
{
2681
*cpu_ref = *cpu_new;
2682
*cpu_new |= flags;
2683
#if STACK_ALIGNMENT < 16
2684
*cpu_new |= X264_CPU_STACK_MOD4;
2685
#endif
2686
if( *cpu_new & X264_CPU_SSE2_IS_FAST )
2687
*cpu_new &= ~X264_CPU_SSE2_IS_SLOW;
2688
if( !quiet )
2689
fprintf( stderr, "x264: %s\n", name );
2690
return check_all_funcs( *cpu_ref, *cpu_new );
2691
}
2692
2693
static int check_all_flags( void )
2694
{
2695
int ret = 0;
2696
int cpu0 = 0, cpu1 = 0;
2697
uint32_t cpu_detect = x264_cpu_detect();
2698
#if HAVE_MMX
2699
if( cpu_detect & X264_CPU_MMX2 )
2700
{
2701
ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMX2, "MMX" );
2702
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" );
2703
cpu1 &= ~X264_CPU_CACHELINE_64;
2704
#if ARCH_X86
2705
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" );
2706
cpu1 &= ~X264_CPU_CACHELINE_32;
2707
#endif
2708
if( cpu_detect & X264_CPU_LZCNT )
2709
{
2710
ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "MMX LZCNT" );
2711
cpu1 &= ~X264_CPU_LZCNT;
2712
}
2713
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "MMX SlowCTZ" );
2714
cpu1 &= ~X264_CPU_SLOW_CTZ;
2715
}
2716
if( cpu_detect & X264_CPU_SSE )
2717
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE, "SSE" );
2718
if( cpu_detect & X264_CPU_SSE2 )
2719
{
2720
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
2721
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
2722
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
2723
cpu1 &= ~X264_CPU_CACHELINE_64;
2724
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSE2 SlowShuffle" );
2725
cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
2726
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "SSE2 SlowCTZ" );
2727
cpu1 &= ~X264_CPU_SLOW_CTZ;
2728
if( cpu_detect & X264_CPU_LZCNT )
2729
{
2730
ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "SSE2 LZCNT" );
2731
cpu1 &= ~X264_CPU_LZCNT;
2732
}
2733
}
2734
if( cpu_detect & X264_CPU_SSE3 )
2735
{
2736
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" );
2737
cpu1 &= ~X264_CPU_CACHELINE_64;
2738
}
2739
if( cpu_detect & X264_CPU_SSSE3 )
2740
{
2741
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
2742
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
2743
cpu1 &= ~X264_CPU_CACHELINE_64;
2744
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSSE3 SlowShuffle" );
2745
cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
2746
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_CTZ, "SSSE3 SlowCTZ" );
2747
cpu1 &= ~X264_CPU_SLOW_CTZ;
2748
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_ATOM, "SSSE3 SlowAtom" );
2749
ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64 SlowAtom" );
2750
cpu1 &= ~X264_CPU_CACHELINE_64;
2751
cpu1 &= ~X264_CPU_SLOW_ATOM;
2752
if( cpu_detect & X264_CPU_LZCNT )
2753
{
2754
ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "SSSE3 LZCNT" );
2755
cpu1 &= ~X264_CPU_LZCNT;
2756
}
2757
}
2758
if( cpu_detect & X264_CPU_SSE4 )
2759
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4, "SSE4" );
2760
if( cpu_detect & X264_CPU_SSE42 )
2761
ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE42, "SSE4.2" );
2762
if( cpu_detect & X264_CPU_AVX )
2763
ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX, "AVX" );
2764
if( cpu_detect & X264_CPU_XOP )
2765
ret |= add_flags( &cpu0, &cpu1, X264_CPU_XOP, "XOP" );
2766
if( cpu_detect & X264_CPU_FMA4 )
2767
{
2768
ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA4, "FMA4" );
2769
cpu1 &= ~X264_CPU_FMA4;
2770
}
2771
if( cpu_detect & X264_CPU_FMA3 )
2772
{
2773
ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA3, "FMA3" );
2774
cpu1 &= ~X264_CPU_FMA3;
2775
}
2776
if( cpu_detect & X264_CPU_AVX2 )
2777
{
2778
ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA3 | X264_CPU_AVX2, "AVX2" );
2779
if( cpu_detect & X264_CPU_LZCNT )
2780
{
2781
ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "AVX2 LZCNT" );
2782
cpu1 &= ~X264_CPU_LZCNT;
2783
}
2784
}
2785
if( cpu_detect & X264_CPU_BMI1 )
2786
{
2787
ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI1, "BMI1" );
2788
cpu1 &= ~X264_CPU_BMI1;
2789
}
2790
if( cpu_detect & X264_CPU_BMI2 )
2791
{
2792
ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI1|X264_CPU_BMI2, "BMI2" );
2793
cpu1 &= ~(X264_CPU_BMI1|X264_CPU_BMI2);
2794
}
2795
#elif ARCH_PPC
2796
if( cpu_detect & X264_CPU_ALTIVEC )
2797
{
2798
fprintf( stderr, "x264: ALTIVEC against C\n" );
2799
ret = check_all_funcs( 0, X264_CPU_ALTIVEC );
2800
}
2801
#elif ARCH_ARM
2802
if( cpu_detect & X264_CPU_NEON )
2803
x264_checkasm_call = x264_checkasm_call_neon;
2804
if( cpu_detect & X264_CPU_ARMV6 )
2805
ret |= add_flags( &cpu0, &cpu1, X264_CPU_ARMV6, "ARMv6" );
2806
if( cpu_detect & X264_CPU_NEON )
2807
ret |= add_flags( &cpu0, &cpu1, X264_CPU_NEON, "NEON" );
2808
if( cpu_detect & X264_CPU_FAST_NEON_MRC )
2809
ret |= add_flags( &cpu0, &cpu1, X264_CPU_FAST_NEON_MRC, "Fast NEON MRC" );
2810
#elif ARCH_AARCH64
2811
if( cpu_detect & X264_CPU_ARMV8 )
2812
ret |= add_flags( &cpu0, &cpu1, X264_CPU_ARMV8, "ARMv8" );
2813
if( cpu_detect & X264_CPU_NEON )
2814
ret |= add_flags( &cpu0, &cpu1, X264_CPU_NEON, "NEON" );
2815
#elif ARCH_MIPS
2816
if( cpu_detect & X264_CPU_MSA )
2817
ret |= add_flags( &cpu0, &cpu1, X264_CPU_MSA, "MSA" );
2818
#endif
2819
return ret;
2820
}
2821
2822
int main(int argc, char *argv[])
2823
{
2824
int ret = 0;
2825
2826
if( argc > 1 && !strncmp( argv[1], "--bench", 7 ) )
2827
{
2828
#if !ARCH_X86 && !ARCH_X86_64 && !ARCH_PPC && !ARCH_ARM && !ARCH_AARCH64 && !ARCH_MIPS
2829
fprintf( stderr, "no --bench for your cpu until you port rdtsc\n" );
2830
return 1;
2831
#endif
2832
do_bench = 1;
2833
if( argv[1][7] == '=' )
2834
{
2835
bench_pattern = argv[1]+8;
2836
bench_pattern_len = strlen(bench_pattern);
2837
}
2838
argc--;
2839
argv++;
2840
}
2841
2842
int seed = ( argc > 1 ) ? atoi(argv[1]) : x264_mdate();
2843
fprintf( stderr, "x264: using random seed %u\n", seed );
2844
srand( seed );
2845
2846
buf1 = x264_malloc( 0x1e00 + 0x2000*sizeof(pixel) + 32*BENCH_ALIGNS );
2847
pbuf1 = x264_malloc( 0x1e00*sizeof(pixel) + 32*BENCH_ALIGNS );
2848
if( !buf1 || !pbuf1 )
2849
{
2850
fprintf( stderr, "malloc failed, unable to initiate tests!\n" );
2851
return -1;
2852
}
2853
#define INIT_POINTER_OFFSETS\
2854
buf2 = buf1 + 0xf00;\
2855
buf3 = buf2 + 0xf00;\
2856
buf4 = buf3 + 0x1000*sizeof(pixel);\
2857
pbuf2 = pbuf1 + 0xf00;\
2858
pbuf3 = (pixel*)buf3;\
2859
pbuf4 = (pixel*)buf4;
2860
INIT_POINTER_OFFSETS;
2861
for( int i = 0; i < 0x1e00; i++ )
2862
{
2863
buf1[i] = rand() & 0xFF;
2864
pbuf1[i] = rand() & PIXEL_MAX;
2865
}
2866
memset( buf1+0x1e00, 0, 0x2000*sizeof(pixel) );
2867
2868
/* 32-byte alignment is guaranteed whenever it's useful, but some functions also vary in speed depending on %64 */
2869
if( do_bench )
2870
for( int i = 0; i < BENCH_ALIGNS && !ret; i++ )
2871
{
2872
INIT_POINTER_OFFSETS;
2873
ret |= x264_stack_pagealign( check_all_flags, i*32 );
2874
buf1 += 32;
2875
pbuf1 += 32;
2876
quiet = 1;
2877
fprintf( stderr, "%d/%d\r", i+1, BENCH_ALIGNS );
2878
}
2879
else
2880
ret = x264_stack_pagealign( check_all_flags, 0 );
2881
2882
if( ret )
2883
{
2884
fprintf( stderr, "x264: at least one test has failed. Go and fix that Right Now!\n" );
2885
return -1;
2886
}
2887
fprintf( stderr, "x264: All tests passed Yeah :)\n" );
2888
if( do_bench )
2889
print_bench();
2890
return 0;
2891
}
2892
2893
2894