Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52866 views
1
/*****************************************************************************
2
* mc.h: motion compensation
3
*****************************************************************************
4
* Copyright (C) 2004-2016 x264 project
5
*
6
* Authors: Loren Merritt <[email protected]>
7
*
8
* This program is free software; you can redistribute it and/or modify
9
* it under the terms of the GNU General Public License as published by
10
* the Free Software Foundation; either version 2 of the License, or
11
* (at your option) any later version.
12
*
13
* This program is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
* GNU General Public License for more details.
17
*
18
* You should have received a copy of the GNU General Public License
19
* along with this program; if not, write to the Free Software
20
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
21
*
22
* This program is also available under a commercial proprietary license.
23
* For more information, contact us at [email protected].
24
*****************************************************************************/
25
26
#ifndef X264_MC_H
27
#define X264_MC_H
28
29
#define MC_CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
30
#define MC_CLIP_ADD2(s,x)\
31
do\
32
{\
33
MC_CLIP_ADD((s)[0], (x)[0]);\
34
MC_CLIP_ADD((s)[1], (x)[1]);\
35
} while(0)
36
37
#define PROPAGATE_LIST(cpu)\
38
void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
39
uint16_t *lowres_costs, int16_t *output,\
40
int bipred_weight, int mb_y, int len );\
41
\
42
static void x264_mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
43
int16_t *propagate_amount, uint16_t *lowres_costs,\
44
int bipred_weight, int mb_y, int len, int list )\
45
{\
46
int16_t *current = h->scratch_buffer2;\
47
\
48
x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
49
current, bipred_weight, mb_y, len );\
50
\
51
unsigned stride = h->mb.i_mb_stride;\
52
unsigned width = h->mb.i_mb_width;\
53
unsigned height = h->mb.i_mb_height;\
54
\
55
for( unsigned i = 0; i < len; current += 32 )\
56
{\
57
int end = X264_MIN( i+8, len );\
58
for( ; i < end; i++, current += 2 )\
59
{\
60
if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
61
continue;\
62
\
63
unsigned mbx = current[0];\
64
unsigned mby = current[1];\
65
unsigned idx0 = mbx + mby * stride;\
66
unsigned idx2 = idx0 + stride;\
67
\
68
/* Shortcut for the simple/common case of zero MV */\
69
if( !M32( mvs[i] ) )\
70
{\
71
MC_CLIP_ADD( ref_costs[idx0], current[16] );\
72
continue;\
73
}\
74
\
75
if( mbx < width-1 && mby < height-1 )\
76
{\
77
MC_CLIP_ADD2( ref_costs+idx0, current+16 );\
78
MC_CLIP_ADD2( ref_costs+idx2, current+32 );\
79
}\
80
else\
81
{\
82
/* Note: this takes advantage of unsigned representation to\
83
* catch negative mbx/mby. */\
84
if( mby < height )\
85
{\
86
if( mbx < width )\
87
MC_CLIP_ADD( ref_costs[idx0+0], current[16] );\
88
if( mbx+1 < width )\
89
MC_CLIP_ADD( ref_costs[idx0+1], current[17] );\
90
}\
91
if( mby+1 < height )\
92
{\
93
if( mbx < width )\
94
MC_CLIP_ADD( ref_costs[idx2+0], current[32] );\
95
if( mbx+1 < width )\
96
MC_CLIP_ADD( ref_costs[idx2+1], current[33] );\
97
}\
98
}\
99
}\
100
}\
101
}
102
103
struct x264_weight_t;
104
typedef void (* weight_fn_t)( pixel *, intptr_t, pixel *,intptr_t, const struct x264_weight_t *, int );
105
typedef struct x264_weight_t
106
{
107
/* aligning the first member is a gcc hack to force the struct to be
108
* 16 byte aligned, as well as force sizeof(struct) to be a multiple of 16 */
109
ALIGNED_16( int16_t cachea[8] );
110
int16_t cacheb[8];
111
int32_t i_denom;
112
int32_t i_scale;
113
int32_t i_offset;
114
weight_fn_t *weightfn;
115
} ALIGNED_16( x264_weight_t );
116
117
extern const x264_weight_t x264_weight_none[3];
118
extern const uint8_t x264_hpel_ref0[16];
119
extern const uint8_t x264_hpel_ref1[16];
120
121
#define SET_WEIGHT( w, b, s, d, o )\
122
{\
123
(w).i_scale = (s);\
124
(w).i_denom = (d);\
125
(w).i_offset = (o);\
126
if( b )\
127
h->mc.weight_cache( h, &w );\
128
else\
129
w.weightfn = NULL;\
130
}
131
132
/* Do the MC
133
* XXX: Only width = 4, 8 or 16 are valid
134
* width == 4 -> height == 4 or 8
135
* width == 8 -> height == 4 or 8 or 16
136
* width == 16-> height == 8 or 16
137
* */
138
139
typedef struct
140
{
141
void (*mc_luma)( pixel *dst, intptr_t i_dst, pixel **src, intptr_t i_src,
142
int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
143
144
/* may round up the dimensions if they're not a power of 2 */
145
pixel* (*get_ref)( pixel *dst, intptr_t *i_dst, pixel **src, intptr_t i_src,
146
int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
147
148
/* mc_chroma may write up to 2 bytes of garbage to the right of dst,
149
* so it must be run from left to right. */
150
void (*mc_chroma)( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,
151
int mvx, int mvy, int i_width, int i_height );
152
153
void (*avg[12])( pixel *dst, intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
154
pixel *src2, intptr_t src2_stride, int i_weight );
155
156
/* only 16x16, 8x8, and 4x4 defined */
157
void (*copy[7])( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
158
void (*copy_16x16_unaligned)( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
159
160
void (*store_interleave_chroma)( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
161
void (*load_deinterleave_chroma_fenc)( pixel *dst, pixel *src, intptr_t i_src, int height );
162
void (*load_deinterleave_chroma_fdec)( pixel *dst, pixel *src, intptr_t i_src, int height );
163
164
void (*plane_copy)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
165
void (*plane_copy_swap)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
166
void (*plane_copy_interleave)( pixel *dst, intptr_t i_dst, pixel *srcu, intptr_t i_srcu,
167
pixel *srcv, intptr_t i_srcv, int w, int h );
168
/* may write up to 15 pixels off the end of each plane */
169
void (*plane_copy_deinterleave)( pixel *dstu, intptr_t i_dstu, pixel *dstv, intptr_t i_dstv,
170
pixel *src, intptr_t i_src, int w, int h );
171
void (*plane_copy_deinterleave_rgb)( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
172
pixel *dstc, intptr_t i_dstc, pixel *src, intptr_t i_src, int pw, int w, int h );
173
void (*plane_copy_deinterleave_v210)( pixel *dsty, intptr_t i_dsty,
174
pixel *dstc, intptr_t i_dstc,
175
uint32_t *src, intptr_t i_src, int w, int h );
176
void (*hpel_filter)( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
177
intptr_t i_stride, int i_width, int i_height, int16_t *buf );
178
179
/* prefetch the next few macroblocks of fenc or fdec */
180
void (*prefetch_fenc) ( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
181
void (*prefetch_fenc_420)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
182
void (*prefetch_fenc_422)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
183
/* prefetch the next few macroblocks of a hpel reference frame */
184
void (*prefetch_ref)( pixel *pix, intptr_t stride, int parity );
185
186
void *(*memcpy_aligned)( void *dst, const void *src, size_t n );
187
void (*memzero_aligned)( void *dst, size_t n );
188
189
/* successive elimination prefilter */
190
void (*integral_init4h)( uint16_t *sum, pixel *pix, intptr_t stride );
191
void (*integral_init8h)( uint16_t *sum, pixel *pix, intptr_t stride );
192
void (*integral_init4v)( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
193
void (*integral_init8v)( uint16_t *sum8, intptr_t stride );
194
195
void (*frame_init_lowres_core)( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
196
intptr_t src_stride, intptr_t dst_stride, int width, int height );
197
weight_fn_t *weight;
198
weight_fn_t *offsetadd;
199
weight_fn_t *offsetsub;
200
void (*weight_cache)( x264_t *, x264_weight_t * );
201
202
void (*mbtree_propagate_cost)( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
203
uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
204
205
void (*mbtree_propagate_list)( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
206
int16_t *propagate_amount, uint16_t *lowres_costs,
207
int bipred_weight, int mb_y, int len, int list );
208
} x264_mc_functions_t;
209
210
void x264_mc_init( int cpu, x264_mc_functions_t *pf, int cpu_independent );
211
212
#endif
213
214