Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52867 views
1
/*****************************************************************************
2
* predict.c: intra prediction
3
*****************************************************************************
4
* Copyright (C) 2003-2016 x264 project
5
*
6
* Authors: Laurent Aimar <[email protected]>
7
* Loren Merritt <[email protected]>
8
* Fiona Glaser <[email protected]>
9
* Henrik Gramner <[email protected]>
10
*
11
* This program is free software; you can redistribute it and/or modify
12
* it under the terms of the GNU General Public License as published by
13
* the Free Software Foundation; either version 2 of the License, or
14
* (at your option) any later version.
15
*
16
* This program is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
* GNU General Public License for more details.
20
*
21
* You should have received a copy of the GNU General Public License
22
* along with this program; if not, write to the Free Software
23
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24
*
25
* This program is also available under a commercial proprietary license.
26
* For more information, contact us at [email protected].
27
*****************************************************************************/
28
29
/* predict4x4 are inspired from ffmpeg h264 decoder */
30
31
32
#include "common.h"
33
34
#if HAVE_MMX
35
# include "x86/predict.h"
36
#endif
37
#if ARCH_PPC
38
# include "ppc/predict.h"
39
#endif
40
#if ARCH_ARM
41
# include "arm/predict.h"
42
#endif
43
#if ARCH_AARCH64
44
# include "aarch64/predict.h"
45
#endif
46
#if ARCH_MIPS
47
# include "mips/predict.h"
48
#endif
49
50
/****************************************************************************
51
* 16x16 prediction for intra luma block
52
****************************************************************************/
53
54
#define PREDICT_16x16_DC(v)\
55
for( int i = 0; i < 16; i++ )\
56
{\
57
MPIXEL_X4( src+ 0 ) = v;\
58
MPIXEL_X4( src+ 4 ) = v;\
59
MPIXEL_X4( src+ 8 ) = v;\
60
MPIXEL_X4( src+12 ) = v;\
61
src += FDEC_STRIDE;\
62
}
63
64
void x264_predict_16x16_dc_c( pixel *src )
65
{
66
int dc = 0;
67
68
for( int i = 0; i < 16; i++ )
69
{
70
dc += src[-1 + i * FDEC_STRIDE];
71
dc += src[i - FDEC_STRIDE];
72
}
73
pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 16 ) >> 5 );
74
75
PREDICT_16x16_DC( dcsplat );
76
}
77
static void x264_predict_16x16_dc_left_c( pixel *src )
78
{
79
int dc = 0;
80
81
for( int i = 0; i < 16; i++ )
82
dc += src[-1 + i * FDEC_STRIDE];
83
pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
84
85
PREDICT_16x16_DC( dcsplat );
86
}
87
static void x264_predict_16x16_dc_top_c( pixel *src )
88
{
89
int dc = 0;
90
91
for( int i = 0; i < 16; i++ )
92
dc += src[i - FDEC_STRIDE];
93
pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
94
95
PREDICT_16x16_DC( dcsplat );
96
}
97
static void x264_predict_16x16_dc_128_c( pixel *src )
98
{
99
PREDICT_16x16_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
100
}
101
void x264_predict_16x16_h_c( pixel *src )
102
{
103
for( int i = 0; i < 16; i++ )
104
{
105
const pixel4 v = PIXEL_SPLAT_X4( src[-1] );
106
MPIXEL_X4( src+ 0 ) = v;
107
MPIXEL_X4( src+ 4 ) = v;
108
MPIXEL_X4( src+ 8 ) = v;
109
MPIXEL_X4( src+12 ) = v;
110
src += FDEC_STRIDE;
111
}
112
}
113
void x264_predict_16x16_v_c( pixel *src )
114
{
115
pixel4 v0 = MPIXEL_X4( &src[ 0-FDEC_STRIDE] );
116
pixel4 v1 = MPIXEL_X4( &src[ 4-FDEC_STRIDE] );
117
pixel4 v2 = MPIXEL_X4( &src[ 8-FDEC_STRIDE] );
118
pixel4 v3 = MPIXEL_X4( &src[12-FDEC_STRIDE] );
119
120
for( int i = 0; i < 16; i++ )
121
{
122
MPIXEL_X4( src+ 0 ) = v0;
123
MPIXEL_X4( src+ 4 ) = v1;
124
MPIXEL_X4( src+ 8 ) = v2;
125
MPIXEL_X4( src+12 ) = v3;
126
src += FDEC_STRIDE;
127
}
128
}
129
void x264_predict_16x16_p_c( pixel *src )
130
{
131
int H = 0, V = 0;
132
133
/* calculate H and V */
134
for( int i = 0; i <= 7; i++ )
135
{
136
H += ( i + 1 ) * ( src[ 8 + i - FDEC_STRIDE ] - src[6 -i -FDEC_STRIDE] );
137
V += ( i + 1 ) * ( src[-1 + (8+i)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
138
}
139
140
int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[15 - FDEC_STRIDE] );
141
int b = ( 5 * H + 32 ) >> 6;
142
int c = ( 5 * V + 32 ) >> 6;
143
144
int i00 = a - b * 7 - c * 7 + 16;
145
146
for( int y = 0; y < 16; y++ )
147
{
148
int pix = i00;
149
for( int x = 0; x < 16; x++ )
150
{
151
src[x] = x264_clip_pixel( pix>>5 );
152
pix += b;
153
}
154
src += FDEC_STRIDE;
155
i00 += c;
156
}
157
}
158
159
160
/****************************************************************************
161
* 8x8 prediction for intra chroma block (4:2:0)
162
****************************************************************************/
163
164
static void x264_predict_8x8c_dc_128_c( pixel *src )
165
{
166
for( int y = 0; y < 8; y++ )
167
{
168
MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
169
MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
170
src += FDEC_STRIDE;
171
}
172
}
173
static void x264_predict_8x8c_dc_left_c( pixel *src )
174
{
175
int dc0 = 0, dc1 = 0;
176
177
for( int y = 0; y < 4; y++ )
178
{
179
dc0 += src[y * FDEC_STRIDE - 1];
180
dc1 += src[(y+4) * FDEC_STRIDE - 1];
181
}
182
pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
183
pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
184
185
for( int y = 0; y < 4; y++ )
186
{
187
MPIXEL_X4( src+0 ) = dc0splat;
188
MPIXEL_X4( src+4 ) = dc0splat;
189
src += FDEC_STRIDE;
190
}
191
for( int y = 0; y < 4; y++ )
192
{
193
MPIXEL_X4( src+0 ) = dc1splat;
194
MPIXEL_X4( src+4 ) = dc1splat;
195
src += FDEC_STRIDE;
196
}
197
198
}
199
static void x264_predict_8x8c_dc_top_c( pixel *src )
200
{
201
int dc0 = 0, dc1 = 0;
202
203
for( int x = 0; x < 4; x++ )
204
{
205
dc0 += src[x - FDEC_STRIDE];
206
dc1 += src[x + 4 - FDEC_STRIDE];
207
}
208
pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
209
pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
210
211
for( int y = 0; y < 8; y++ )
212
{
213
MPIXEL_X4( src+0 ) = dc0splat;
214
MPIXEL_X4( src+4 ) = dc1splat;
215
src += FDEC_STRIDE;
216
}
217
}
218
void x264_predict_8x8c_dc_c( pixel *src )
219
{
220
int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
221
222
/*
223
s0 s1
224
s2
225
s3
226
*/
227
for( int i = 0; i < 4; i++ )
228
{
229
s0 += src[i - FDEC_STRIDE];
230
s1 += src[i + 4 - FDEC_STRIDE];
231
s2 += src[-1 + i * FDEC_STRIDE];
232
s3 += src[-1 + (i+4)*FDEC_STRIDE];
233
}
234
/*
235
dc0 dc1
236
dc2 dc3
237
*/
238
pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
239
pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
240
pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
241
pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
242
243
for( int y = 0; y < 4; y++ )
244
{
245
MPIXEL_X4( src+0 ) = dc0;
246
MPIXEL_X4( src+4 ) = dc1;
247
src += FDEC_STRIDE;
248
}
249
250
for( int y = 0; y < 4; y++ )
251
{
252
MPIXEL_X4( src+0 ) = dc2;
253
MPIXEL_X4( src+4 ) = dc3;
254
src += FDEC_STRIDE;
255
}
256
}
257
void x264_predict_8x8c_h_c( pixel *src )
258
{
259
for( int i = 0; i < 8; i++ )
260
{
261
pixel4 v = PIXEL_SPLAT_X4( src[-1] );
262
MPIXEL_X4( src+0 ) = v;
263
MPIXEL_X4( src+4 ) = v;
264
src += FDEC_STRIDE;
265
}
266
}
267
void x264_predict_8x8c_v_c( pixel *src )
268
{
269
pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
270
pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
271
272
for( int i = 0; i < 8; i++ )
273
{
274
MPIXEL_X4( src+0 ) = v0;
275
MPIXEL_X4( src+4 ) = v1;
276
src += FDEC_STRIDE;
277
}
278
}
279
void x264_predict_8x8c_p_c( pixel *src )
280
{
281
int H = 0, V = 0;
282
283
for( int i = 0; i < 4; i++ )
284
{
285
H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
286
V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
287
}
288
289
int a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
290
int b = ( 17 * H + 16 ) >> 5;
291
int c = ( 17 * V + 16 ) >> 5;
292
int i00 = a -3*b -3*c + 16;
293
294
for( int y = 0; y < 8; y++ )
295
{
296
int pix = i00;
297
for( int x = 0; x < 8; x++ )
298
{
299
src[x] = x264_clip_pixel( pix>>5 );
300
pix += b;
301
}
302
src += FDEC_STRIDE;
303
i00 += c;
304
}
305
}
306
307
/****************************************************************************
308
* 8x16 prediction for intra chroma block (4:2:2)
309
****************************************************************************/
310
311
static void x264_predict_8x16c_dc_128_c( pixel *src )
312
{
313
for( int y = 0; y < 16; y++ )
314
{
315
MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
316
MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
317
src += FDEC_STRIDE;
318
}
319
}
320
static void x264_predict_8x16c_dc_left_c( pixel *src )
321
{
322
for( int i = 0; i < 4; i++ )
323
{
324
int dc = 0;
325
326
for( int y = 0; y < 4; y++ )
327
dc += src[y*FDEC_STRIDE - 1];
328
329
pixel4 dcsplat = PIXEL_SPLAT_X4( (dc + 2) >> 2 );
330
331
for( int y = 0; y < 4; y++ )
332
{
333
MPIXEL_X4( src+0 ) = dcsplat;
334
MPIXEL_X4( src+4 ) = dcsplat;
335
src += FDEC_STRIDE;
336
}
337
}
338
}
339
static void x264_predict_8x16c_dc_top_c( pixel *src )
340
{
341
int dc0 = 0, dc1 = 0;
342
343
for(int x = 0; x < 4; x++ )
344
{
345
dc0 += src[x - FDEC_STRIDE];
346
dc1 += src[x + 4 - FDEC_STRIDE];
347
}
348
pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
349
pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
350
351
for( int y = 0; y < 16; y++ )
352
{
353
MPIXEL_X4( src+0 ) = dc0splat;
354
MPIXEL_X4( src+4 ) = dc1splat;
355
src += FDEC_STRIDE;
356
}
357
}
358
void x264_predict_8x16c_dc_c( pixel *src )
359
{
360
int s0 = 0, s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0;
361
362
/*
363
s0 s1
364
s2
365
s3
366
s4
367
s5
368
*/
369
for( int i = 0; i < 4; i++ )
370
{
371
s0 += src[i+0 - FDEC_STRIDE];
372
s1 += src[i+4 - FDEC_STRIDE];
373
s2 += src[-1 + (i+0) * FDEC_STRIDE];
374
s3 += src[-1 + (i+4) * FDEC_STRIDE];
375
s4 += src[-1 + (i+8) * FDEC_STRIDE];
376
s5 += src[-1 + (i+12) * FDEC_STRIDE];
377
}
378
/*
379
dc0 dc1
380
dc2 dc3
381
dc4 dc5
382
dc6 dc7
383
*/
384
pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
385
pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
386
pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
387
pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
388
pixel4 dc4 = PIXEL_SPLAT_X4( ( s4 + 2 ) >> 2 );
389
pixel4 dc5 = PIXEL_SPLAT_X4( ( s1 + s4 + 4 ) >> 3 );
390
pixel4 dc6 = PIXEL_SPLAT_X4( ( s5 + 2 ) >> 2 );
391
pixel4 dc7 = PIXEL_SPLAT_X4( ( s1 + s5 + 4 ) >> 3 );
392
393
for( int y = 0; y < 4; y++ )
394
{
395
MPIXEL_X4( src+0 ) = dc0;
396
MPIXEL_X4( src+4 ) = dc1;
397
src += FDEC_STRIDE;
398
}
399
for( int y = 0; y < 4; y++ )
400
{
401
MPIXEL_X4( src+0 ) = dc2;
402
MPIXEL_X4( src+4 ) = dc3;
403
src += FDEC_STRIDE;
404
}
405
for( int y = 0; y < 4; y++ )
406
{
407
MPIXEL_X4( src+0 ) = dc4;
408
MPIXEL_X4( src+4 ) = dc5;
409
src += FDEC_STRIDE;
410
}
411
for( int y = 0; y < 4; y++ )
412
{
413
MPIXEL_X4( src+0 ) = dc6;
414
MPIXEL_X4( src+4 ) = dc7;
415
src += FDEC_STRIDE;
416
}
417
}
418
void x264_predict_8x16c_h_c( pixel *src )
419
{
420
for( int i = 0; i < 16; i++ )
421
{
422
pixel4 v = PIXEL_SPLAT_X4( src[-1] );
423
MPIXEL_X4( src+0 ) = v;
424
MPIXEL_X4( src+4 ) = v;
425
src += FDEC_STRIDE;
426
}
427
}
428
void x264_predict_8x16c_v_c( pixel *src )
429
{
430
pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
431
pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
432
433
for( int i = 0; i < 16; i++ )
434
{
435
MPIXEL_X4( src+0 ) = v0;
436
MPIXEL_X4( src+4 ) = v1;
437
src += FDEC_STRIDE;
438
}
439
}
440
void x264_predict_8x16c_p_c( pixel *src )
441
{
442
int H = 0;
443
int V = 0;
444
445
for( int i = 0; i < 4; i++ )
446
H += ( i + 1 ) * ( src[4 + i - FDEC_STRIDE] - src[2 - i - FDEC_STRIDE] );
447
for( int i = 0; i < 8; i++ )
448
V += ( i + 1 ) * ( src[-1 + (i+8)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
449
450
int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
451
int b = ( 17 * H + 16 ) >> 5;
452
int c = ( 5 * V + 32 ) >> 6;
453
int i00 = a -3*b -7*c + 16;
454
455
for( int y = 0; y < 16; y++ )
456
{
457
int pix = i00;
458
for( int x = 0; x < 8; x++ )
459
{
460
src[x] = x264_clip_pixel( pix>>5 );
461
pix += b;
462
}
463
src += FDEC_STRIDE;
464
i00 += c;
465
}
466
}
467
468
/****************************************************************************
469
* 4x4 prediction for intra luma block
470
****************************************************************************/
471
472
#define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
473
#define SRC_X4(x,y) MPIXEL_X4( &SRC(x,y) )
474
475
#define PREDICT_4x4_DC(v)\
476
SRC_X4(0,0) = SRC_X4(0,1) = SRC_X4(0,2) = SRC_X4(0,3) = v;
477
478
static void x264_predict_4x4_dc_128_c( pixel *src )
479
{
480
PREDICT_4x4_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
481
}
482
static void x264_predict_4x4_dc_left_c( pixel *src )
483
{
484
pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) + 2) >> 2 );
485
PREDICT_4x4_DC( dc );
486
}
487
static void x264_predict_4x4_dc_top_c( pixel *src )
488
{
489
pixel4 dc = PIXEL_SPLAT_X4( (SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 2) >> 2 );
490
PREDICT_4x4_DC( dc );
491
}
492
void x264_predict_4x4_dc_c( pixel *src )
493
{
494
pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) +
495
SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 4) >> 3 );
496
PREDICT_4x4_DC( dc );
497
}
498
void x264_predict_4x4_h_c( pixel *src )
499
{
500
SRC_X4(0,0) = PIXEL_SPLAT_X4( SRC(-1,0) );
501
SRC_X4(0,1) = PIXEL_SPLAT_X4( SRC(-1,1) );
502
SRC_X4(0,2) = PIXEL_SPLAT_X4( SRC(-1,2) );
503
SRC_X4(0,3) = PIXEL_SPLAT_X4( SRC(-1,3) );
504
}
505
void x264_predict_4x4_v_c( pixel *src )
506
{
507
PREDICT_4x4_DC(SRC_X4(0,-1));
508
}
509
510
#define PREDICT_4x4_LOAD_LEFT\
511
int l0 = SRC(-1,0);\
512
int l1 = SRC(-1,1);\
513
int l2 = SRC(-1,2);\
514
UNUSED int l3 = SRC(-1,3);
515
516
#define PREDICT_4x4_LOAD_TOP\
517
int t0 = SRC(0,-1);\
518
int t1 = SRC(1,-1);\
519
int t2 = SRC(2,-1);\
520
UNUSED int t3 = SRC(3,-1);
521
522
#define PREDICT_4x4_LOAD_TOP_RIGHT\
523
int t4 = SRC(4,-1);\
524
int t5 = SRC(5,-1);\
525
int t6 = SRC(6,-1);\
526
UNUSED int t7 = SRC(7,-1);
527
528
#define F1(a,b) (((a)+(b)+1)>>1)
529
#define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
530
531
static void x264_predict_4x4_ddl_c( pixel *src )
532
{
533
PREDICT_4x4_LOAD_TOP
534
PREDICT_4x4_LOAD_TOP_RIGHT
535
SRC(0,0)= F2(t0,t1,t2);
536
SRC(1,0)=SRC(0,1)= F2(t1,t2,t3);
537
SRC(2,0)=SRC(1,1)=SRC(0,2)= F2(t2,t3,t4);
538
SRC(3,0)=SRC(2,1)=SRC(1,2)=SRC(0,3)= F2(t3,t4,t5);
539
SRC(3,1)=SRC(2,2)=SRC(1,3)= F2(t4,t5,t6);
540
SRC(3,2)=SRC(2,3)= F2(t5,t6,t7);
541
SRC(3,3)= F2(t6,t7,t7);
542
}
543
static void x264_predict_4x4_ddr_c( pixel *src )
544
{
545
int lt = SRC(-1,-1);
546
PREDICT_4x4_LOAD_LEFT
547
PREDICT_4x4_LOAD_TOP
548
SRC(3,0)= F2(t3,t2,t1);
549
SRC(2,0)=SRC(3,1)= F2(t2,t1,t0);
550
SRC(1,0)=SRC(2,1)=SRC(3,2)= F2(t1,t0,lt);
551
SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)= F2(t0,lt,l0);
552
SRC(0,1)=SRC(1,2)=SRC(2,3)= F2(lt,l0,l1);
553
SRC(0,2)=SRC(1,3)= F2(l0,l1,l2);
554
SRC(0,3)= F2(l1,l2,l3);
555
}
556
557
static void x264_predict_4x4_vr_c( pixel *src )
558
{
559
int lt = SRC(-1,-1);
560
PREDICT_4x4_LOAD_LEFT
561
PREDICT_4x4_LOAD_TOP
562
SRC(0,3)= F2(l2,l1,l0);
563
SRC(0,2)= F2(l1,l0,lt);
564
SRC(0,1)=SRC(1,3)= F2(l0,lt,t0);
565
SRC(0,0)=SRC(1,2)= F1(lt,t0);
566
SRC(1,1)=SRC(2,3)= F2(lt,t0,t1);
567
SRC(1,0)=SRC(2,2)= F1(t0,t1);
568
SRC(2,1)=SRC(3,3)= F2(t0,t1,t2);
569
SRC(2,0)=SRC(3,2)= F1(t1,t2);
570
SRC(3,1)= F2(t1,t2,t3);
571
SRC(3,0)= F1(t2,t3);
572
}
573
574
static void x264_predict_4x4_hd_c( pixel *src )
575
{
576
int lt= SRC(-1,-1);
577
PREDICT_4x4_LOAD_LEFT
578
PREDICT_4x4_LOAD_TOP
579
SRC(0,3)= F1(l2,l3);
580
SRC(1,3)= F2(l1,l2,l3);
581
SRC(0,2)=SRC(2,3)= F1(l1,l2);
582
SRC(1,2)=SRC(3,3)= F2(l0,l1,l2);
583
SRC(0,1)=SRC(2,2)= F1(l0,l1);
584
SRC(1,1)=SRC(3,2)= F2(lt,l0,l1);
585
SRC(0,0)=SRC(2,1)= F1(lt,l0);
586
SRC(1,0)=SRC(3,1)= F2(t0,lt,l0);
587
SRC(2,0)= F2(t1,t0,lt);
588
SRC(3,0)= F2(t2,t1,t0);
589
}
590
591
static void x264_predict_4x4_vl_c( pixel *src )
592
{
593
PREDICT_4x4_LOAD_TOP
594
PREDICT_4x4_LOAD_TOP_RIGHT
595
SRC(0,0)= F1(t0,t1);
596
SRC(0,1)= F2(t0,t1,t2);
597
SRC(1,0)=SRC(0,2)= F1(t1,t2);
598
SRC(1,1)=SRC(0,3)= F2(t1,t2,t3);
599
SRC(2,0)=SRC(1,2)= F1(t2,t3);
600
SRC(2,1)=SRC(1,3)= F2(t2,t3,t4);
601
SRC(3,0)=SRC(2,2)= F1(t3,t4);
602
SRC(3,1)=SRC(2,3)= F2(t3,t4,t5);
603
SRC(3,2)= F1(t4,t5);
604
SRC(3,3)= F2(t4,t5,t6);
605
}
606
607
static void x264_predict_4x4_hu_c( pixel *src )
608
{
609
PREDICT_4x4_LOAD_LEFT
610
SRC(0,0)= F1(l0,l1);
611
SRC(1,0)= F2(l0,l1,l2);
612
SRC(2,0)=SRC(0,1)= F1(l1,l2);
613
SRC(3,0)=SRC(1,1)= F2(l1,l2,l3);
614
SRC(2,1)=SRC(0,2)= F1(l2,l3);
615
SRC(3,1)=SRC(1,2)= F2(l2,l3,l3);
616
SRC(3,2)=SRC(1,3)=SRC(0,3)=
617
SRC(2,2)=SRC(2,3)=SRC(3,3)= l3;
618
}
619
620
/****************************************************************************
621
* 8x8 prediction for intra luma block
622
****************************************************************************/
623
624
#define PL(y) \
625
edge[14-y] = F2(SRC(-1,y-1), SRC(-1,y), SRC(-1,y+1));
626
#define PT(x) \
627
edge[16+x] = F2(SRC(x-1,-1), SRC(x,-1), SRC(x+1,-1));
628
629
static void x264_predict_8x8_filter_c( pixel *src, pixel edge[36], int i_neighbor, int i_filters )
630
{
631
/* edge[7..14] = l7..l0
632
* edge[15] = lt
633
* edge[16..31] = t0 .. t15
634
* edge[32] = t15 */
635
636
int have_lt = i_neighbor & MB_TOPLEFT;
637
if( i_filters & MB_LEFT )
638
{
639
edge[15] = (SRC(0,-1) + 2*SRC(-1,-1) + SRC(-1,0) + 2) >> 2;
640
edge[14] = ((have_lt ? SRC(-1,-1) : SRC(-1,0))
641
+ 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2;
642
PL(1) PL(2) PL(3) PL(4) PL(5) PL(6)
643
edge[6] =
644
edge[7] = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
645
}
646
647
if( i_filters & MB_TOP )
648
{
649
int have_tr = i_neighbor & MB_TOPRIGHT;
650
edge[16] = ((have_lt ? SRC(-1,-1) : SRC(0,-1))
651
+ 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2;
652
PT(1) PT(2) PT(3) PT(4) PT(5) PT(6)
653
edge[23] = (SRC(6,-1) + 2*SRC(7,-1)
654
+ (have_tr ? SRC(8,-1) : SRC(7,-1)) + 2) >> 2;
655
656
if( i_filters & MB_TOPRIGHT )
657
{
658
if( have_tr )
659
{
660
PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14)
661
edge[31] =
662
edge[32] = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2;
663
}
664
else
665
{
666
MPIXEL_X4( edge+24 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
667
MPIXEL_X4( edge+28 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
668
edge[32] = SRC(7,-1);
669
}
670
}
671
}
672
}
673
674
#undef PL
675
#undef PT
676
677
#define PL(y) \
678
UNUSED int l##y = edge[14-y];
679
#define PT(x) \
680
UNUSED int t##x = edge[16+x];
681
#define PREDICT_8x8_LOAD_TOPLEFT \
682
int lt = edge[15];
683
#define PREDICT_8x8_LOAD_LEFT \
684
PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
685
#define PREDICT_8x8_LOAD_TOP \
686
PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
687
#define PREDICT_8x8_LOAD_TOPRIGHT \
688
PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14) PT(15)
689
690
#define PREDICT_8x8_DC(v) \
691
for( int y = 0; y < 8; y++ ) { \
692
MPIXEL_X4( src+0 ) = v; \
693
MPIXEL_X4( src+4 ) = v; \
694
src += FDEC_STRIDE; \
695
}
696
697
static void x264_predict_8x8_dc_128_c( pixel *src, pixel edge[36] )
698
{
699
PREDICT_8x8_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
700
}
701
static void x264_predict_8x8_dc_left_c( pixel *src, pixel edge[36] )
702
{
703
PREDICT_8x8_LOAD_LEFT
704
pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3 );
705
PREDICT_8x8_DC( dc );
706
}
707
static void x264_predict_8x8_dc_top_c( pixel *src, pixel edge[36] )
708
{
709
PREDICT_8x8_LOAD_TOP
710
pixel4 dc = PIXEL_SPLAT_X4( (t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3 );
711
PREDICT_8x8_DC( dc );
712
}
713
void x264_predict_8x8_dc_c( pixel *src, pixel edge[36] )
714
{
715
PREDICT_8x8_LOAD_LEFT
716
PREDICT_8x8_LOAD_TOP
717
pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4 );
718
PREDICT_8x8_DC( dc );
719
}
720
void x264_predict_8x8_h_c( pixel *src, pixel edge[36] )
721
{
722
PREDICT_8x8_LOAD_LEFT
723
#define ROW(y) MPIXEL_X4( src+y*FDEC_STRIDE+0 ) =\
724
MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = PIXEL_SPLAT_X4( l##y );
725
ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
726
#undef ROW
727
}
728
void x264_predict_8x8_v_c( pixel *src, pixel edge[36] )
729
{
730
pixel4 top[2] = { MPIXEL_X4( edge+16 ),
731
MPIXEL_X4( edge+20 ) };
732
for( int y = 0; y < 8; y++ )
733
{
734
MPIXEL_X4( src+y*FDEC_STRIDE+0 ) = top[0];
735
MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = top[1];
736
}
737
}
738
static void x264_predict_8x8_ddl_c( pixel *src, pixel edge[36] )
739
{
740
PREDICT_8x8_LOAD_TOP
741
PREDICT_8x8_LOAD_TOPRIGHT
742
SRC(0,0)= F2(t0,t1,t2);
743
SRC(0,1)=SRC(1,0)= F2(t1,t2,t3);
744
SRC(0,2)=SRC(1,1)=SRC(2,0)= F2(t2,t3,t4);
745
SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= F2(t3,t4,t5);
746
SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= F2(t4,t5,t6);
747
SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= F2(t5,t6,t7);
748
SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= F2(t6,t7,t8);
749
SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= F2(t7,t8,t9);
750
SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= F2(t8,t9,t10);
751
SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= F2(t9,t10,t11);
752
SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= F2(t10,t11,t12);
753
SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= F2(t11,t12,t13);
754
SRC(5,7)=SRC(6,6)=SRC(7,5)= F2(t12,t13,t14);
755
SRC(6,7)=SRC(7,6)= F2(t13,t14,t15);
756
SRC(7,7)= F2(t14,t15,t15);
757
}
758
static void x264_predict_8x8_ddr_c( pixel *src, pixel edge[36] )
759
{
760
PREDICT_8x8_LOAD_TOP
761
PREDICT_8x8_LOAD_LEFT
762
PREDICT_8x8_LOAD_TOPLEFT
763
SRC(0,7)= F2(l7,l6,l5);
764
SRC(0,6)=SRC(1,7)= F2(l6,l5,l4);
765
SRC(0,5)=SRC(1,6)=SRC(2,7)= F2(l5,l4,l3);
766
SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= F2(l4,l3,l2);
767
SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= F2(l3,l2,l1);
768
SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= F2(l2,l1,l0);
769
SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= F2(l1,l0,lt);
770
SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= F2(l0,lt,t0);
771
SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= F2(lt,t0,t1);
772
SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= F2(t0,t1,t2);
773
SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= F2(t1,t2,t3);
774
SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= F2(t2,t3,t4);
775
SRC(5,0)=SRC(6,1)=SRC(7,2)= F2(t3,t4,t5);
776
SRC(6,0)=SRC(7,1)= F2(t4,t5,t6);
777
SRC(7,0)= F2(t5,t6,t7);
778
779
}
780
static void x264_predict_8x8_vr_c( pixel *src, pixel edge[36] )
781
{
782
PREDICT_8x8_LOAD_TOP
783
PREDICT_8x8_LOAD_LEFT
784
PREDICT_8x8_LOAD_TOPLEFT
785
SRC(0,6)= F2(l5,l4,l3);
786
SRC(0,7)= F2(l6,l5,l4);
787
SRC(0,4)=SRC(1,6)= F2(l3,l2,l1);
788
SRC(0,5)=SRC(1,7)= F2(l4,l3,l2);
789
SRC(0,2)=SRC(1,4)=SRC(2,6)= F2(l1,l0,lt);
790
SRC(0,3)=SRC(1,5)=SRC(2,7)= F2(l2,l1,l0);
791
SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= F2(l0,lt,t0);
792
SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= F1(lt,t0);
793
SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= F2(lt,t0,t1);
794
SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= F1(t0,t1);
795
SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= F2(t0,t1,t2);
796
SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= F1(t1,t2);
797
SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= F2(t1,t2,t3);
798
SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= F1(t2,t3);
799
SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= F2(t2,t3,t4);
800
SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= F1(t3,t4);
801
SRC(5,1)=SRC(6,3)=SRC(7,5)= F2(t3,t4,t5);
802
SRC(5,0)=SRC(6,2)=SRC(7,4)= F1(t4,t5);
803
SRC(6,1)=SRC(7,3)= F2(t4,t5,t6);
804
SRC(6,0)=SRC(7,2)= F1(t5,t6);
805
SRC(7,1)= F2(t5,t6,t7);
806
SRC(7,0)= F1(t6,t7);
807
}
808
static void x264_predict_8x8_hd_c( pixel *src, pixel edge[36] )
809
{
810
PREDICT_8x8_LOAD_TOP
811
PREDICT_8x8_LOAD_LEFT
812
PREDICT_8x8_LOAD_TOPLEFT
813
int p1 = pack_pixel_1to2(F1(l6,l7), F2(l5,l6,l7));
814
int p2 = pack_pixel_1to2(F1(l5,l6), F2(l4,l5,l6));
815
int p3 = pack_pixel_1to2(F1(l4,l5), F2(l3,l4,l5));
816
int p4 = pack_pixel_1to2(F1(l3,l4), F2(l2,l3,l4));
817
int p5 = pack_pixel_1to2(F1(l2,l3), F2(l1,l2,l3));
818
int p6 = pack_pixel_1to2(F1(l1,l2), F2(l0,l1,l2));
819
int p7 = pack_pixel_1to2(F1(l0,l1), F2(lt,l0,l1));
820
int p8 = pack_pixel_1to2(F1(lt,l0), F2(l0,lt,t0));
821
int p9 = pack_pixel_1to2(F2(t1,t0,lt), F2(t2,t1,t0));
822
int p10 = pack_pixel_1to2(F2(t3,t2,t1), F2(t4,t3,t2));
823
int p11 = pack_pixel_1to2(F2(t5,t4,t3), F2(t6,t5,t4));
824
SRC_X4(0,7)= pack_pixel_2to4(p1,p2);
825
SRC_X4(0,6)= pack_pixel_2to4(p2,p3);
826
SRC_X4(4,7)=SRC_X4(0,5)= pack_pixel_2to4(p3,p4);
827
SRC_X4(4,6)=SRC_X4(0,4)= pack_pixel_2to4(p4,p5);
828
SRC_X4(4,5)=SRC_X4(0,3)= pack_pixel_2to4(p5,p6);
829
SRC_X4(4,4)=SRC_X4(0,2)= pack_pixel_2to4(p6,p7);
830
SRC_X4(4,3)=SRC_X4(0,1)= pack_pixel_2to4(p7,p8);
831
SRC_X4(4,2)=SRC_X4(0,0)= pack_pixel_2to4(p8,p9);
832
SRC_X4(4,1)= pack_pixel_2to4(p9,p10);
833
SRC_X4(4,0)= pack_pixel_2to4(p10,p11);
834
}
835
static void x264_predict_8x8_vl_c( pixel *src, pixel edge[36] )
836
{
837
PREDICT_8x8_LOAD_TOP
838
PREDICT_8x8_LOAD_TOPRIGHT
839
SRC(0,0)= F1(t0,t1);
840
SRC(0,1)= F2(t0,t1,t2);
841
SRC(0,2)=SRC(1,0)= F1(t1,t2);
842
SRC(0,3)=SRC(1,1)= F2(t1,t2,t3);
843
SRC(0,4)=SRC(1,2)=SRC(2,0)= F1(t2,t3);
844
SRC(0,5)=SRC(1,3)=SRC(2,1)= F2(t2,t3,t4);
845
SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= F1(t3,t4);
846
SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= F2(t3,t4,t5);
847
SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= F1(t4,t5);
848
SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= F2(t4,t5,t6);
849
SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= F1(t5,t6);
850
SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= F2(t5,t6,t7);
851
SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= F1(t6,t7);
852
SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= F2(t6,t7,t8);
853
SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= F1(t7,t8);
854
SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= F2(t7,t8,t9);
855
SRC(5,6)=SRC(6,4)=SRC(7,2)= F1(t8,t9);
856
SRC(5,7)=SRC(6,5)=SRC(7,3)= F2(t8,t9,t10);
857
SRC(6,6)=SRC(7,4)= F1(t9,t10);
858
SRC(6,7)=SRC(7,5)= F2(t9,t10,t11);
859
SRC(7,6)= F1(t10,t11);
860
SRC(7,7)= F2(t10,t11,t12);
861
}
862
static void x264_predict_8x8_hu_c( pixel *src, pixel edge[36] )
863
{
864
PREDICT_8x8_LOAD_LEFT
865
int p1 = pack_pixel_1to2(F1(l0,l1), F2(l0,l1,l2));
866
int p2 = pack_pixel_1to2(F1(l1,l2), F2(l1,l2,l3));
867
int p3 = pack_pixel_1to2(F1(l2,l3), F2(l2,l3,l4));
868
int p4 = pack_pixel_1to2(F1(l3,l4), F2(l3,l4,l5));
869
int p5 = pack_pixel_1to2(F1(l4,l5), F2(l4,l5,l6));
870
int p6 = pack_pixel_1to2(F1(l5,l6), F2(l5,l6,l7));
871
int p7 = pack_pixel_1to2(F1(l6,l7), F2(l6,l7,l7));
872
int p8 = pack_pixel_1to2(l7,l7);
873
SRC_X4(0,0)= pack_pixel_2to4(p1,p2);
874
SRC_X4(0,1)= pack_pixel_2to4(p2,p3);
875
SRC_X4(4,0)=SRC_X4(0,2)= pack_pixel_2to4(p3,p4);
876
SRC_X4(4,1)=SRC_X4(0,3)= pack_pixel_2to4(p4,p5);
877
SRC_X4(4,2)=SRC_X4(0,4)= pack_pixel_2to4(p5,p6);
878
SRC_X4(4,3)=SRC_X4(0,5)= pack_pixel_2to4(p6,p7);
879
SRC_X4(4,4)=SRC_X4(0,6)= pack_pixel_2to4(p7,p8);
880
SRC_X4(4,5)=SRC_X4(4,6)= SRC_X4(0,7) = SRC_X4(4,7) = pack_pixel_2to4(p8,p8);
881
}
882
883
/****************************************************************************
884
* Exported functions:
885
****************************************************************************/
886
void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
887
{
888
pf[I_PRED_16x16_V ] = x264_predict_16x16_v_c;
889
pf[I_PRED_16x16_H ] = x264_predict_16x16_h_c;
890
pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_c;
891
pf[I_PRED_16x16_P ] = x264_predict_16x16_p_c;
892
pf[I_PRED_16x16_DC_LEFT]= x264_predict_16x16_dc_left_c;
893
pf[I_PRED_16x16_DC_TOP ]= x264_predict_16x16_dc_top_c;
894
pf[I_PRED_16x16_DC_128 ]= x264_predict_16x16_dc_128_c;
895
896
#if HAVE_MMX
897
x264_predict_16x16_init_mmx( cpu, pf );
898
#endif
899
900
#if HAVE_ALTIVEC
901
if( cpu&X264_CPU_ALTIVEC )
902
x264_predict_16x16_init_altivec( pf );
903
#endif
904
905
#if HAVE_ARMV6
906
x264_predict_16x16_init_arm( cpu, pf );
907
#endif
908
909
#if ARCH_AARCH64
910
x264_predict_16x16_init_aarch64( cpu, pf );
911
#endif
912
913
#if !HIGH_BIT_DEPTH
914
#if HAVE_MSA
915
if( cpu&X264_CPU_MSA )
916
{
917
pf[I_PRED_16x16_V ] = x264_intra_predict_vert_16x16_msa;
918
pf[I_PRED_16x16_H ] = x264_intra_predict_hor_16x16_msa;
919
pf[I_PRED_16x16_DC] = x264_intra_predict_dc_16x16_msa;
920
pf[I_PRED_16x16_P ] = x264_intra_predict_plane_16x16_msa;
921
pf[I_PRED_16x16_DC_LEFT]= x264_intra_predict_dc_left_16x16_msa;
922
pf[I_PRED_16x16_DC_TOP ]= x264_intra_predict_dc_top_16x16_msa;
923
pf[I_PRED_16x16_DC_128 ]= x264_intra_predict_dc_128_16x16_msa;
924
}
925
#endif
926
#endif
927
}
928
929
void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
930
{
931
pf[I_PRED_CHROMA_V ] = x264_predict_8x8c_v_c;
932
pf[I_PRED_CHROMA_H ] = x264_predict_8x8c_h_c;
933
pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_c;
934
pf[I_PRED_CHROMA_P ] = x264_predict_8x8c_p_c;
935
pf[I_PRED_CHROMA_DC_LEFT]= x264_predict_8x8c_dc_left_c;
936
pf[I_PRED_CHROMA_DC_TOP ]= x264_predict_8x8c_dc_top_c;
937
pf[I_PRED_CHROMA_DC_128 ]= x264_predict_8x8c_dc_128_c;
938
939
#if HAVE_MMX
940
x264_predict_8x8c_init_mmx( cpu, pf );
941
#endif
942
943
#if HAVE_ALTIVEC
944
if( cpu&X264_CPU_ALTIVEC )
945
x264_predict_8x8c_init_altivec( pf );
946
#endif
947
948
#if HAVE_ARMV6
949
x264_predict_8x8c_init_arm( cpu, pf );
950
#endif
951
952
#if ARCH_AARCH64
953
x264_predict_8x8c_init_aarch64( cpu, pf );
954
#endif
955
956
#if !HIGH_BIT_DEPTH
957
#if HAVE_MSA
958
if( cpu&X264_CPU_MSA )
959
{
960
pf[I_PRED_CHROMA_P ] = x264_intra_predict_plane_8x8_msa;
961
}
962
#endif
963
#endif
964
}
965
966
void x264_predict_8x16c_init( int cpu, x264_predict_t pf[7] )
967
{
968
pf[I_PRED_CHROMA_V ] = x264_predict_8x16c_v_c;
969
pf[I_PRED_CHROMA_H ] = x264_predict_8x16c_h_c;
970
pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_c;
971
pf[I_PRED_CHROMA_P ] = x264_predict_8x16c_p_c;
972
pf[I_PRED_CHROMA_DC_LEFT]= x264_predict_8x16c_dc_left_c;
973
pf[I_PRED_CHROMA_DC_TOP ]= x264_predict_8x16c_dc_top_c;
974
pf[I_PRED_CHROMA_DC_128 ]= x264_predict_8x16c_dc_128_c;
975
976
#if HAVE_MMX
977
x264_predict_8x16c_init_mmx( cpu, pf );
978
#endif
979
980
#if HAVE_ARMV6
981
x264_predict_8x16c_init_arm( cpu, pf );
982
#endif
983
984
#if ARCH_AARCH64
985
x264_predict_8x16c_init_aarch64( cpu, pf );
986
#endif
987
}
988
989
void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
990
{
991
pf[I_PRED_8x8_V] = x264_predict_8x8_v_c;
992
pf[I_PRED_8x8_H] = x264_predict_8x8_h_c;
993
pf[I_PRED_8x8_DC] = x264_predict_8x8_dc_c;
994
pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_c;
995
pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_c;
996
pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_c;
997
pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_c;
998
pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_c;
999
pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_c;
1000
pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_c;
1001
pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_c;
1002
pf[I_PRED_8x8_DC_128] = x264_predict_8x8_dc_128_c;
1003
*predict_filter = x264_predict_8x8_filter_c;
1004
1005
#if HAVE_MMX
1006
x264_predict_8x8_init_mmx( cpu, pf, predict_filter );
1007
#endif
1008
1009
#if HAVE_ARMV6
1010
x264_predict_8x8_init_arm( cpu, pf, predict_filter );
1011
#endif
1012
1013
#if ARCH_AARCH64
1014
x264_predict_8x8_init_aarch64( cpu, pf, predict_filter );
1015
#endif
1016
1017
#if !HIGH_BIT_DEPTH
1018
#if HAVE_MSA
1019
if( cpu&X264_CPU_MSA )
1020
{
1021
pf[I_PRED_8x8_DDL] = x264_intra_predict_ddl_8x8_msa;
1022
}
1023
#endif
1024
#endif
1025
}
1026
1027
void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
1028
{
1029
pf[I_PRED_4x4_V] = x264_predict_4x4_v_c;
1030
pf[I_PRED_4x4_H] = x264_predict_4x4_h_c;
1031
pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_c;
1032
pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_c;
1033
pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_c;
1034
pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_c;
1035
pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_c;
1036
pf[I_PRED_4x4_VL] = x264_predict_4x4_vl_c;
1037
pf[I_PRED_4x4_HU] = x264_predict_4x4_hu_c;
1038
pf[I_PRED_4x4_DC_LEFT]= x264_predict_4x4_dc_left_c;
1039
pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_c;
1040
pf[I_PRED_4x4_DC_128] = x264_predict_4x4_dc_128_c;
1041
1042
#if HAVE_MMX
1043
x264_predict_4x4_init_mmx( cpu, pf );
1044
#endif
1045
1046
#if HAVE_ARMV6
1047
x264_predict_4x4_init_arm( cpu, pf );
1048
#endif
1049
1050
#if ARCH_AARCH64
1051
x264_predict_4x4_init_aarch64( cpu, pf );
1052
#endif
1053
}
1054
1055
1056