Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52869 views
1
/*
2
* AltiVec-enhanced yuv2yuvX
3
*
4
* Copyright (C) 2004 Romain Dolbeau <[email protected]>
5
* based on the equivalent C code in swscale.c
6
*
7
* This file is part of FFmpeg.
8
*
9
* FFmpeg is free software; you can redistribute it and/or
10
* modify it under the terms of the GNU Lesser General Public
11
* License as published by the Free Software Foundation; either
12
* version 2.1 of the License, or (at your option) any later version.
13
*
14
* FFmpeg is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
* Lesser General Public License for more details.
18
*
19
* You should have received a copy of the GNU Lesser General Public
20
* License along with FFmpeg; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
*/
23
24
#include <inttypes.h>
25
26
#include "config.h"
27
#include "libswscale/swscale.h"
28
#include "libswscale/swscale_internal.h"
29
#include "libavutil/attributes.h"
30
#include "libavutil/cpu.h"
31
#include "yuv2rgb_altivec.h"
32
#include "libavutil/ppc/util_altivec.h"
33
34
#if HAVE_ALTIVEC
35
#define vzero vec_splat_s32(0)
36
37
#if HAVE_BIGENDIAN
38
#define GET_LS(a,b,c,s) {\
39
vector signed short l2 = vec_ld(((b) << 1) + 16, s);\
40
ls = vec_perm(a, l2, c);\
41
a = l2;\
42
}
43
#else
44
#define GET_LS(a,b,c,s) {\
45
ls = a;\
46
a = vec_vsx_ld(((b) << 1) + 16, s);\
47
}
48
#endif
49
50
#define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
51
vector signed short ls;\
52
GET_LS(l1, x, perm, src);\
53
vector signed int i1 = vec_mule(filter, ls);\
54
vector signed int i2 = vec_mulo(filter, ls);\
55
vector signed int vf1, vf2;\
56
vf1 = vec_mergeh(i1, i2);\
57
vf2 = vec_mergel(i1, i2);\
58
d1 = vec_add(d1, vf1);\
59
d2 = vec_add(d2, vf2);\
60
} while (0)
61
62
#if HAVE_BIGENDIAN
63
#define LOAD_FILTER(vf,f) {\
64
vector unsigned char perm0 = vec_lvsl(joffset, f);\
65
vf = vec_ld(joffset, f);\
66
vf = vec_perm(vf, vf, perm0);\
67
}
68
#define LOAD_L1(ll1,s,p){\
69
p = vec_lvsl(xoffset, s);\
70
ll1 = vec_ld(xoffset, s);\
71
}
72
#else
73
#define LOAD_FILTER(vf,f) {\
74
vf = vec_vsx_ld(joffset, f);\
75
}
76
#define LOAD_L1(ll1,s,p){\
77
ll1 = vec_vsx_ld(xoffset, s);\
78
}
79
#endif
80
81
static void yuv2planeX_16_altivec(const int16_t *filter, int filterSize,
82
const int16_t **src, uint8_t *dest,
83
const uint8_t *dither, int offset, int x)
84
{
85
register int i, j;
86
LOCAL_ALIGNED(16, int, val, [16]);
87
vector signed int vo1, vo2, vo3, vo4;
88
vector unsigned short vs1, vs2;
89
vector unsigned char vf;
90
vector unsigned int altivec_vectorShiftInt19 =
91
vec_add(vec_splat_u32(10), vec_splat_u32(9));
92
93
for (i = 0; i < 16; i++)
94
val[i] = dither[(x + i + offset) & 7] << 12;
95
96
vo1 = vec_ld(0, val);
97
vo2 = vec_ld(16, val);
98
vo3 = vec_ld(32, val);
99
vo4 = vec_ld(48, val);
100
101
for (j = 0; j < filterSize; j++) {
102
unsigned int joffset=j<<1;
103
unsigned int xoffset=x<<1;
104
vector unsigned char perm;
105
vector signed short l1,vLumFilter;
106
LOAD_FILTER(vLumFilter,filter);
107
vLumFilter = vec_splat(vLumFilter, 0);
108
LOAD_L1(l1,src[j],perm);
109
yuv2planeX_8(vo1, vo2, l1, src[j], x, perm, vLumFilter);
110
yuv2planeX_8(vo3, vo4, l1, src[j], x + 8, perm, vLumFilter);
111
}
112
113
vo1 = vec_sra(vo1, altivec_vectorShiftInt19);
114
vo2 = vec_sra(vo2, altivec_vectorShiftInt19);
115
vo3 = vec_sra(vo3, altivec_vectorShiftInt19);
116
vo4 = vec_sra(vo4, altivec_vectorShiftInt19);
117
vs1 = vec_packsu(vo1, vo2);
118
vs2 = vec_packsu(vo3, vo4);
119
vf = vec_packsu(vs1, vs2);
120
VEC_ST(vf, 0, dest);
121
}
122
123
124
static inline void yuv2planeX_u(const int16_t *filter, int filterSize,
125
const int16_t **src, uint8_t *dest, int dstW,
126
const uint8_t *dither, int offset, int x)
127
{
128
int i, j;
129
130
for (i = x; i < dstW; i++) {
131
int t = dither[(i + offset) & 7] << 12;
132
for (j = 0; j < filterSize; j++)
133
t += src[j][i] * filter[j];
134
dest[i] = av_clip_uint8(t >> 19);
135
}
136
}
137
138
static void yuv2planeX_altivec(const int16_t *filter, int filterSize,
139
const int16_t **src, uint8_t *dest, int dstW,
140
const uint8_t *dither, int offset)
141
{
142
int dst_u = -(uintptr_t)dest & 15;
143
int i;
144
145
yuv2planeX_u(filter, filterSize, src, dest, dst_u, dither, offset, 0);
146
147
for (i = dst_u; i < dstW - 15; i += 16)
148
yuv2planeX_16_altivec(filter, filterSize, src, dest + i, dither,
149
offset, i);
150
151
yuv2planeX_u(filter, filterSize, src, dest, dstW, dither, offset, i);
152
}
153
154
#if HAVE_BIGENDIAN
155
// The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
156
157
// The neat trick: We only care for half the elements,
158
// high or low depending on (i<<3)%16 (it's 0 or 8 here),
159
// and we're going to use vec_mule, so we choose
160
// carefully how to "unpack" the elements into the even slots.
161
#define GET_VF4(a, vf, f) {\
162
vf = vec_ld(a<< 3, f);\
163
if ((a << 3) % 16)\
164
vf = vec_mergel(vf, (vector signed short)vzero);\
165
else\
166
vf = vec_mergeh(vf, (vector signed short)vzero);\
167
}
168
#define FIRST_LOAD(sv, pos, s, per) {\
169
sv = vec_ld(pos, s);\
170
per = vec_lvsl(pos, s);\
171
}
172
#define UPDATE_PTR(s0, d0, s1, d1) {\
173
d0 = s0;\
174
d1 = s1;\
175
}
176
#define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
177
v1 = vec_ld(pos + a + 16, s);\
178
vf = vec_perm(v0, v1, per);\
179
}
180
#define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) {\
181
if ((((uintptr_t)s + pos) % 16) > 8) {\
182
v1 = vec_ld(pos + a + 16, s);\
183
}\
184
vf = vec_perm(v0, src_v1, per);\
185
}
186
#define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
187
vf1 = vec_ld((a * 2 * filterSize) + (b * 2) + 16 + off, f);\
188
vf = vec_perm(vf0, vf1, per);\
189
}
190
#else /* else of #if HAVE_BIGENDIAN */
191
#define GET_VF4(a, vf, f) {\
192
vf = (vector signed short)vec_vsx_ld(a << 3, f);\
193
vf = vec_mergeh(vf, (vector signed short)vzero);\
194
}
195
#define FIRST_LOAD(sv, pos, s, per) {}
196
#define UPDATE_PTR(s0, d0, s1, d1) {}
197
#define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
198
vf = vec_vsx_ld(pos + a, s);\
199
}
200
#define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
201
#define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
202
vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
203
}
204
#endif /* end of #if HAVE_BIGENDIAN */
205
206
static void hScale_altivec_real(SwsContext *c, int16_t *dst, int dstW,
207
const uint8_t *src, const int16_t *filter,
208
const int32_t *filterPos, int filterSize)
209
{
210
register int i;
211
LOCAL_ALIGNED(16, int, tempo, [4]);
212
213
if (filterSize % 4) {
214
for (i = 0; i < dstW; i++) {
215
register int j;
216
register int srcPos = filterPos[i];
217
register int val = 0;
218
for (j = 0; j < filterSize; j++)
219
val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
220
dst[i] = FFMIN(val >> 7, (1 << 15) - 1);
221
}
222
} else
223
switch (filterSize) {
224
case 4:
225
for (i = 0; i < dstW; i++) {
226
register int srcPos = filterPos[i];
227
228
vector unsigned char src_vF = unaligned_load(srcPos, src);
229
vector signed short src_v, filter_v;
230
vector signed int val_vEven, val_s;
231
src_v = // vec_unpackh sign-extends...
232
(vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
233
// now put our elements in the even slots
234
src_v = vec_mergeh(src_v, (vector signed short)vzero);
235
GET_VF4(i, filter_v, filter);
236
val_vEven = vec_mule(src_v, filter_v);
237
val_s = vec_sums(val_vEven, vzero);
238
vec_st(val_s, 0, tempo);
239
dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
240
}
241
break;
242
case 8:
243
for (i = 0; i < dstW; i++) {
244
register int srcPos = filterPos[i];
245
vector unsigned char src_vF, src_v0, src_v1;
246
vector unsigned char permS;
247
vector signed short src_v, filter_v;
248
vector signed int val_v, val_s;
249
FIRST_LOAD(src_v0, srcPos, src, permS);
250
LOAD_SRCV8(srcPos, 0, src, permS, src_v0, src_v1, src_vF);
251
src_v = // vec_unpackh sign-extends...
252
(vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
253
filter_v = vec_ld(i << 4, filter);
254
val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
255
val_s = vec_sums(val_v, vzero);
256
vec_st(val_s, 0, tempo);
257
dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
258
}
259
break;
260
261
case 16:
262
for (i = 0; i < dstW; i++) {
263
register int srcPos = filterPos[i];
264
265
vector unsigned char src_vF = unaligned_load(srcPos, src);
266
vector signed short src_vA = // vec_unpackh sign-extends...
267
(vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
268
vector signed short src_vB = // vec_unpackh sign-extends...
269
(vector signed short)(VEC_MERGEL((vector unsigned char)vzero, src_vF));
270
vector signed short filter_v0 = vec_ld(i << 5, filter);
271
vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
272
273
vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
274
vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
275
276
vector signed int val_s = vec_sums(val_v, vzero);
277
278
VEC_ST(val_s, 0, tempo);
279
dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
280
}
281
break;
282
283
default:
284
for (i = 0; i < dstW; i++) {
285
register int j, offset = i * 2 * filterSize;
286
register int srcPos = filterPos[i];
287
288
vector signed int val_s, val_v = (vector signed int)vzero;
289
vector signed short filter_v0R;
290
vector unsigned char permF, src_v0, permS;
291
FIRST_LOAD(filter_v0R, offset, filter, permF);
292
FIRST_LOAD(src_v0, srcPos, src, permS);
293
294
for (j = 0; j < filterSize - 15; j += 16) {
295
vector unsigned char src_v1, src_vF;
296
vector signed short filter_v1R, filter_v2R, filter_v0, filter_v1;
297
LOAD_SRCV(srcPos, j, src, permS, src_v0, src_v1, src_vF);
298
vector signed short src_vA = // vec_unpackh sign-extends...
299
(vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
300
vector signed short src_vB = // vec_unpackh sign-extends...
301
(vector signed short)(VEC_MERGEL((vector unsigned char)vzero, src_vF));
302
GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v0, 0);
303
GET_VFD(i, j, filter, filter_v1R, filter_v2R, permF, filter_v1, 16);
304
305
vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
306
val_v = vec_msums(src_vB, filter_v1, val_acc);
307
UPDATE_PTR(filter_v2R, filter_v0R, src_v1, src_v0);
308
}
309
310
if (j < filterSize - 7) {
311
// loading src_v0 is useless, it's already done above
312
vector unsigned char src_v1, src_vF;
313
vector signed short src_v, filter_v1R, filter_v;
314
LOAD_SRCV8(srcPos, j, src, permS, src_v0, src_v1, src_vF);
315
src_v = // vec_unpackh sign-extends...
316
(vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
317
GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v, 0);
318
val_v = vec_msums(src_v, filter_v, val_v);
319
}
320
val_s = vec_sums(val_v, vzero);
321
322
VEC_ST(val_s, 0, tempo);
323
dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
324
}
325
}
326
}
327
#endif /* HAVE_ALTIVEC */
328
329
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
330
{
331
#if HAVE_ALTIVEC
332
enum AVPixelFormat dstFormat = c->dstFormat;
333
334
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
335
return;
336
337
if (c->srcBpc == 8 && c->dstBpc <= 14) {
338
c->hyScale = c->hcScale = hScale_altivec_real;
339
}
340
if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
341
dstFormat != AV_PIX_FMT_NV12 && dstFormat != AV_PIX_FMT_NV21 &&
342
!c->alpPixBuf) {
343
c->yuv2planeX = yuv2planeX_altivec;
344
}
345
346
/* The following list of supported dstFormat values should
347
* match what's found in the body of ff_yuv2packedX_altivec() */
348
if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf) {
349
switch (c->dstFormat) {
350
case AV_PIX_FMT_ABGR:
351
c->yuv2packedX = ff_yuv2abgr_X_altivec;
352
break;
353
case AV_PIX_FMT_BGRA:
354
c->yuv2packedX = ff_yuv2bgra_X_altivec;
355
break;
356
case AV_PIX_FMT_ARGB:
357
c->yuv2packedX = ff_yuv2argb_X_altivec;
358
break;
359
case AV_PIX_FMT_RGBA:
360
c->yuv2packedX = ff_yuv2rgba_X_altivec;
361
break;
362
case AV_PIX_FMT_BGR24:
363
c->yuv2packedX = ff_yuv2bgr24_X_altivec;
364
break;
365
case AV_PIX_FMT_RGB24:
366
c->yuv2packedX = ff_yuv2rgb24_X_altivec;
367
break;
368
}
369
}
370
#endif /* HAVE_ALTIVEC */
371
}
372
373