Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52867 views
1
/*****************************************************************************
2
* mc-c.c: aarch64 motion compensation
3
*****************************************************************************
4
* Copyright (C) 2009-2016 x264 project
5
*
6
* Authors: David Conrad <[email protected]>
7
* Janne Grunau <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License as published by
11
* the Free Software Foundation; either version 2 of the License, or
12
* (at your option) any later version.
13
*
14
* This program is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
* GNU General Public License for more details.
18
*
19
* You should have received a copy of the GNU General Public License
20
* along with this program; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22
*
23
* This program is also available under a commercial proprietary license.
24
* For more information, contact us at [email protected].
25
*****************************************************************************/
26
27
#include "common/common.h"
28
#include "mc.h"
29
30
void x264_prefetch_ref_aarch64( uint8_t *, intptr_t, int );
31
void x264_prefetch_fenc_420_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
32
void x264_prefetch_fenc_422_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
33
34
void *x264_memcpy_aligned_neon( void *dst, const void *src, size_t n );
35
void x264_memzero_aligned_neon( void *dst, size_t n );
36
37
void x264_pixel_avg_16x16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
38
void x264_pixel_avg_16x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
39
void x264_pixel_avg_8x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
40
void x264_pixel_avg_8x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
41
void x264_pixel_avg_8x4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
42
void x264_pixel_avg_4x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
43
void x264_pixel_avg_4x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
44
void x264_pixel_avg_4x4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
45
void x264_pixel_avg_4x2_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
46
47
void x264_pixel_avg2_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
48
void x264_pixel_avg2_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
49
void x264_pixel_avg2_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
50
void x264_pixel_avg2_w20_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
51
52
void x264_plane_copy_neon( pixel *dst, intptr_t i_dst,
53
pixel *src, intptr_t i_src, int w, int h );
54
void x264_plane_copy_deinterleave_neon( pixel *dstu, intptr_t i_dstu,
55
pixel *dstv, intptr_t i_dstv,
56
pixel *src, intptr_t i_src, int w, int h );
57
void x264_plane_copy_deinterleave_rgb_neon( pixel *dsta, intptr_t i_dsta,
58
pixel *dstb, intptr_t i_dstb,
59
pixel *dstc, intptr_t i_dstc,
60
pixel *src, intptr_t i_src, int pw, int w, int h );
61
void x264_plane_copy_interleave_neon( pixel *dst, intptr_t i_dst,
62
pixel *srcu, intptr_t i_srcu,
63
pixel *srcv, intptr_t i_srcv, int w, int h );
64
65
void x264_store_interleave_chroma_neon( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
66
void x264_load_deinterleave_chroma_fdec_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
67
void x264_load_deinterleave_chroma_fenc_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
68
69
#define MC_WEIGHT(func)\
70
void x264_mc_weight_w20##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
71
void x264_mc_weight_w16##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
72
void x264_mc_weight_w8##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
73
void x264_mc_weight_w4##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
74
\
75
static void (* x264_mc##func##_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int ) =\
76
{\
77
x264_mc_weight_w4##func##_neon,\
78
x264_mc_weight_w4##func##_neon,\
79
x264_mc_weight_w8##func##_neon,\
80
x264_mc_weight_w16##func##_neon,\
81
x264_mc_weight_w16##func##_neon,\
82
x264_mc_weight_w20##func##_neon,\
83
};
84
85
MC_WEIGHT()
86
MC_WEIGHT(_nodenom)
87
MC_WEIGHT(_offsetadd)
88
MC_WEIGHT(_offsetsub)
89
90
void x264_mc_copy_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
91
void x264_mc_copy_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
92
void x264_mc_copy_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
93
94
void x264_mc_chroma_neon( uint8_t *, uint8_t *, intptr_t, uint8_t *, intptr_t, int, int, int, int );
95
void integral_init4h_neon( uint16_t *, uint8_t *, intptr_t );
96
void integral_init4v_neon( uint16_t *, uint16_t *, intptr_t );
97
void integral_init8h_neon( uint16_t *, uint8_t *, intptr_t );
98
void integral_init8v_neon( uint16_t *, intptr_t );
99
void x264_frame_init_lowres_core_neon( uint8_t *, uint8_t *, uint8_t *, uint8_t *, uint8_t *, intptr_t, intptr_t, int, int );
100
101
void x264_mbtree_propagate_cost_neon( int16_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, float *, int );
102
103
#if !HIGH_BIT_DEPTH
104
static void x264_weight_cache_neon( x264_t *h, x264_weight_t *w )
105
{
106
if( w->i_scale == 1<<w->i_denom )
107
{
108
if( w->i_offset < 0 )
109
{
110
w->weightfn = x264_mc_offsetsub_wtab_neon;
111
w->cachea[0] = -w->i_offset;
112
}
113
else
114
{
115
w->weightfn = x264_mc_offsetadd_wtab_neon;
116
w->cachea[0] = w->i_offset;
117
}
118
}
119
else if( !w->i_denom )
120
w->weightfn = x264_mc_nodenom_wtab_neon;
121
else
122
w->weightfn = x264_mc_wtab_neon;
123
}
124
125
static void (* const x264_pixel_avg_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int ) =
126
{
127
NULL,
128
x264_pixel_avg2_w4_neon,
129
x264_pixel_avg2_w8_neon,
130
x264_pixel_avg2_w16_neon, // no slower than w12, so no point in a separate function
131
x264_pixel_avg2_w16_neon,
132
x264_pixel_avg2_w20_neon,
133
};
134
135
static void (* const x264_mc_copy_wtab_neon[5])( uint8_t *, intptr_t, uint8_t *, intptr_t, int ) =
136
{
137
NULL,
138
x264_mc_copy_w4_neon,
139
x264_mc_copy_w8_neon,
140
NULL,
141
x264_mc_copy_w16_neon,
142
};
143
144
static void mc_luma_neon( uint8_t *dst, intptr_t i_dst_stride,
145
uint8_t *src[4], intptr_t i_src_stride,
146
int mvx, int mvy,
147
int i_width, int i_height, const x264_weight_t *weight )
148
{
149
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
150
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
151
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
152
if ( (mvy&3) == 3 ) // explict if() to force conditional add
153
src1 += i_src_stride;
154
155
if( qpel_idx & 5 ) /* qpel interpolation needed */
156
{
157
uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
158
x264_pixel_avg_wtab_neon[i_width>>2](
159
dst, i_dst_stride, src1, i_src_stride,
160
src2, i_height );
161
if( weight->weightfn )
162
weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
163
}
164
else if( weight->weightfn )
165
weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
166
else
167
x264_mc_copy_wtab_neon[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, i_height );
168
}
169
170
static uint8_t *get_ref_neon( uint8_t *dst, intptr_t *i_dst_stride,
171
uint8_t *src[4], intptr_t i_src_stride,
172
int mvx, int mvy,
173
int i_width, int i_height, const x264_weight_t *weight )
174
{
175
int qpel_idx = ((mvy&3)<<2) + (mvx&3);
176
intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
177
uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
178
if ( (mvy&3) == 3 ) // explict if() to force conditional add
179
src1 += i_src_stride;
180
181
if( qpel_idx & 5 ) /* qpel interpolation needed */
182
{
183
uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
184
x264_pixel_avg_wtab_neon[i_width>>2](
185
dst, *i_dst_stride, src1, i_src_stride,
186
src2, i_height );
187
if( weight->weightfn )
188
weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
189
return dst;
190
}
191
else if( weight->weightfn )
192
{
193
weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
194
return dst;
195
}
196
else
197
{
198
*i_dst_stride = i_src_stride;
199
return src1;
200
}
201
}
202
203
void x264_hpel_filter_neon( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
204
uint8_t *src, intptr_t stride, int width,
205
int height, int16_t *buf );
206
#endif // !HIGH_BIT_DEPTH
207
208
PROPAGATE_LIST(neon)
209
210
void x264_mc_init_aarch64( int cpu, x264_mc_functions_t *pf )
211
{
212
#if !HIGH_BIT_DEPTH
213
if( cpu&X264_CPU_ARMV8 )
214
{
215
pf->prefetch_fenc_420 = x264_prefetch_fenc_420_aarch64;
216
pf->prefetch_fenc_422 = x264_prefetch_fenc_422_aarch64;
217
pf->prefetch_ref = x264_prefetch_ref_aarch64;
218
}
219
220
if( !(cpu&X264_CPU_NEON) )
221
return;
222
223
pf->copy_16x16_unaligned = x264_mc_copy_w16_neon;
224
pf->copy[PIXEL_16x16] = x264_mc_copy_w16_neon;
225
pf->copy[PIXEL_8x8] = x264_mc_copy_w8_neon;
226
pf->copy[PIXEL_4x4] = x264_mc_copy_w4_neon;
227
228
pf->plane_copy = x264_plane_copy_neon;
229
pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_neon;
230
pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_neon;
231
pf->plane_copy_interleave = x264_plane_copy_interleave_neon;
232
233
pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_neon;
234
pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_neon;
235
pf->store_interleave_chroma = x264_store_interleave_chroma_neon;
236
237
pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_neon;
238
pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_neon;
239
pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_neon;
240
pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_neon;
241
pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_neon;
242
pf->avg[PIXEL_4x16] = x264_pixel_avg_4x16_neon;
243
pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_neon;
244
pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_neon;
245
pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_neon;
246
247
pf->weight = x264_mc_wtab_neon;
248
pf->offsetadd = x264_mc_offsetadd_wtab_neon;
249
pf->offsetsub = x264_mc_offsetsub_wtab_neon;
250
pf->weight_cache = x264_weight_cache_neon;
251
252
pf->mc_chroma = x264_mc_chroma_neon;
253
pf->mc_luma = mc_luma_neon;
254
pf->get_ref = get_ref_neon;
255
pf->hpel_filter = x264_hpel_filter_neon;
256
pf->frame_init_lowres_core = x264_frame_init_lowres_core_neon;
257
258
pf->integral_init4h = integral_init4h_neon;
259
pf->integral_init8h = integral_init8h_neon;
260
pf->integral_init4v = integral_init4v_neon;
261
pf->integral_init8v = integral_init8v_neon;
262
263
pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_neon;
264
pf->mbtree_propagate_list = x264_mbtree_propagate_list_neon;
265
266
pf->memcpy_aligned = x264_memcpy_aligned_neon;
267
pf->memzero_aligned = x264_memzero_aligned_neon;
268
#endif // !HIGH_BIT_DEPTH
269
}
270
271