Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52868 views
1
;******************************************************************************
2
;* Copyright (c) 2012 Michael Niedermayer
3
;* Copyright (c) 2014 James Almer <jamrial <at> gmail.com>
4
;* Copyright (c) 2014 Ronald S. Bultje <rsbultje@gmail.com>
5
;*
6
;* This file is part of FFmpeg.
7
;*
8
;* FFmpeg is free software; you can redistribute it and/or
9
;* modify it under the terms of the GNU Lesser General Public
10
;* License as published by the Free Software Foundation; either
11
;* version 2.1 of the License, or (at your option) any later version.
12
;*
13
;* FFmpeg is distributed in the hope that it will be useful,
14
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
;* Lesser General Public License for more details.
17
;*
18
;* You should have received a copy of the GNU Lesser General Public
19
;* License along with FFmpeg; if not, write to the Free Software
20
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
;******************************************************************************
22
23
%include "libavutil/x86/x86util.asm"
24
25
%if ARCH_X86_64
26
%define pointer resq
27
%else
28
%define pointer resd
29
%endif
30
31
struc ResampleContext
32
.av_class: pointer 1
33
.filter_bank: pointer 1
34
.filter_length: resd 1
35
.filter_alloc: resd 1
36
.ideal_dst_incr: resd 1
37
.dst_incr: resd 1
38
.dst_incr_div: resd 1
39
.dst_incr_mod: resd 1
40
.index: resd 1
41
.frac: resd 1
42
.src_incr: resd 1
43
.compensation_distance: resd 1
44
.phase_shift: resd 1
45
.phase_mask: resd 1
46
47
; there's a few more here but we only care about the first few
48
endstruc
49
50
SECTION_RODATA
51
52
pf_1: dd 1.0
53
pdbl_1: dq 1.0
54
pd_0x4000: dd 0x4000
55
56
SECTION .text
57
58
%macro RESAMPLE_FNS 3-5 ; format [float or int16], bps, log2_bps, float op suffix [s or d], 1.0 constant
59
; int resample_common_$format(ResampleContext *ctx, $format *dst,
60
; const $format *src, int size, int update_ctx)
61
%if ARCH_X86_64 ; unix64 and win64
62
cglobal resample_common_%1, 0, 15, 2, ctx, dst, src, phase_shift, index, frac, \
63
dst_incr_mod, size, min_filter_count_x4, \
64
min_filter_len_x4, dst_incr_div, src_incr, \
65
phase_mask, dst_end, filter_bank
66
67
; use red-zone for variable storage
68
%define ctx_stackq [rsp-0x8]
69
%define src_stackq [rsp-0x10]
70
%if WIN64
71
%define update_context_stackd r4m
72
%else ; unix64
73
%define update_context_stackd [rsp-0x14]
74
%endif
75
76
; load as many variables in registers as possible; for the rest, store
77
; on stack so that we have 'ctx' available as one extra register
78
mov sized, r3d
79
mov phase_maskd, [ctxq+ResampleContext.phase_mask]
80
%if UNIX64
81
mov update_context_stackd, r4d
82
%endif
83
mov indexd, [ctxq+ResampleContext.index]
84
mov fracd, [ctxq+ResampleContext.frac]
85
mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
86
mov filter_bankq, [ctxq+ResampleContext.filter_bank]
87
mov src_incrd, [ctxq+ResampleContext.src_incr]
88
mov ctx_stackq, ctxq
89
mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
90
mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
91
shl min_filter_len_x4d, %3
92
lea dst_endq, [dstq+sizeq*%2]
93
94
%if UNIX64
95
mov ecx, [ctxq+ResampleContext.phase_shift]
96
mov edi, [ctxq+ResampleContext.filter_alloc]
97
98
DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
99
filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
100
src_incr, phase_mask, dst_end, filter_bank
101
%elif WIN64
102
mov R9d, [ctxq+ResampleContext.filter_alloc]
103
mov ecx, [ctxq+ResampleContext.phase_shift]
104
105
DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
106
filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
107
src_incr, phase_mask, dst_end, filter_bank
108
%endif
109
110
neg min_filter_len_x4q
111
sub filter_bankq, min_filter_len_x4q
112
sub srcq, min_filter_len_x4q
113
mov src_stackq, srcq
114
%else ; x86-32
115
cglobal resample_common_%1, 1, 7, 2, ctx, phase_shift, dst, frac, \
116
index, min_filter_length_x4, filter_bank
117
118
; push temp variables to stack
119
%define ctx_stackq r0mp
120
%define src_stackq r2mp
121
%define update_context_stackd r4m
122
123
mov dstq, r1mp
124
mov r3, r3mp
125
lea r3, [dstq+r3*%2]
126
PUSH dword [ctxq+ResampleContext.dst_incr_div]
127
PUSH dword [ctxq+ResampleContext.dst_incr_mod]
128
PUSH dword [ctxq+ResampleContext.filter_alloc]
129
PUSH r3
130
PUSH dword [ctxq+ResampleContext.phase_mask]
131
PUSH dword [ctxq+ResampleContext.src_incr]
132
mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
133
mov indexd, [ctxq+ResampleContext.index]
134
shl min_filter_length_x4d, %3
135
mov fracd, [ctxq+ResampleContext.frac]
136
neg min_filter_length_x4q
137
mov filter_bankq, [ctxq+ResampleContext.filter_bank]
138
sub r2mp, min_filter_length_x4q
139
sub filter_bankq, min_filter_length_x4q
140
PUSH min_filter_length_x4q
141
PUSH filter_bankq
142
mov phase_shiftd, [ctxq+ResampleContext.phase_shift]
143
144
DEFINE_ARGS src, phase_shift, dst, frac, index, min_filter_count_x4, filter
145
146
%define filter_bankq dword [rsp+0x0]
147
%define min_filter_length_x4q dword [rsp+0x4]
148
%define src_incrd dword [rsp+0x8]
149
%define phase_maskd dword [rsp+0xc]
150
%define dst_endq dword [rsp+0x10]
151
%define filter_allocd dword [rsp+0x14]
152
%define dst_incr_modd dword [rsp+0x18]
153
%define dst_incr_divd dword [rsp+0x1c]
154
155
mov srcq, r2mp
156
%endif
157
158
.loop:
159
mov filterd, filter_allocd
160
imul filterd, indexd
161
%if ARCH_X86_64
162
mov min_filter_count_x4q, min_filter_len_x4q
163
lea filterq, [filter_bankq+filterq*%2]
164
%else ; x86-32
165
mov min_filter_count_x4q, filter_bankq
166
lea filterq, [min_filter_count_x4q+filterq*%2]
167
mov min_filter_count_x4q, min_filter_length_x4q
168
%endif
169
%ifidn %1, int16
170
movd m0, [pd_0x4000]
171
%else ; float/double
172
xorps m0, m0, m0
173
%endif
174
175
align 16
176
.inner_loop:
177
movu m1, [srcq+min_filter_count_x4q*1]
178
%ifidn %1, int16
179
%if cpuflag(xop)
180
vpmadcswd m0, m1, [filterq+min_filter_count_x4q*1], m0
181
%else
182
pmaddwd m1, [filterq+min_filter_count_x4q*1]
183
paddd m0, m1
184
%endif
185
%else ; float/double
186
%if cpuflag(fma4) || cpuflag(fma3)
187
fmaddp%4 m0, m1, [filterq+min_filter_count_x4q*1], m0
188
%else
189
mulp%4 m1, m1, [filterq+min_filter_count_x4q*1]
190
addp%4 m0, m0, m1
191
%endif ; cpuflag
192
%endif
193
add min_filter_count_x4q, mmsize
194
js .inner_loop
195
196
%ifidn %1, int16
197
HADDD m0, m1
198
psrad m0, 15
199
add fracd, dst_incr_modd
200
packssdw m0, m0
201
add indexd, dst_incr_divd
202
movd [dstq], m0
203
%else ; float/double
204
; horizontal sum & store
205
%if mmsize == 32
206
vextractf128 xm1, m0, 0x1
207
addps xm0, xm1
208
%endif
209
movhlps xm1, xm0
210
%ifidn %1, float
211
addps xm0, xm1
212
shufps xm1, xm0, xm0, q0001
213
%endif
214
add fracd, dst_incr_modd
215
addp%4 xm0, xm1
216
add indexd, dst_incr_divd
217
movs%4 [dstq], xm0
218
%endif
219
cmp fracd, src_incrd
220
jl .skip
221
sub fracd, src_incrd
222
inc indexd
223
224
%if UNIX64
225
DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
226
index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
227
src_incr, phase_mask, dst_end, filter_bank
228
%elif WIN64
229
DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
230
index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
231
src_incr, phase_mask, dst_end, filter_bank
232
%else ; x86-32
233
DEFINE_ARGS src, phase_shift, dst, frac, index, index_incr
234
%endif
235
236
.skip:
237
mov index_incrd, indexd
238
add dstq, %2
239
and indexd, phase_maskd
240
sar index_incrd, phase_shiftb
241
lea srcq, [srcq+index_incrq*%2]
242
cmp dstq, dst_endq
243
jne .loop
244
245
%if ARCH_X86_64
246
DEFINE_ARGS ctx, dst, src, phase_shift, index, frac
247
%else ; x86-32
248
DEFINE_ARGS src, ctx, update_context, frac, index
249
%endif
250
251
cmp dword update_context_stackd, 0
252
jz .skip_store
253
; strictly speaking, the function should always return the consumed
254
; number of bytes; however, we only use the value if update_context
255
; is true, so let's just leave it uninitialized otherwise
256
mov ctxq, ctx_stackq
257
movifnidn rax, srcq
258
mov [ctxq+ResampleContext.frac ], fracd
259
sub rax, src_stackq
260
mov [ctxq+ResampleContext.index], indexd
261
shr rax, %3
262
263
.skip_store:
264
%if ARCH_X86_32
265
ADD rsp, 0x20
266
%endif
267
RET
268
269
; int resample_linear_$format(ResampleContext *ctx, float *dst,
270
; const float *src, int size, int update_ctx)
271
%if ARCH_X86_64 ; unix64 and win64
272
%if UNIX64
273
cglobal resample_linear_%1, 0, 15, 5, ctx, dst, phase_mask, phase_shift, index, frac, \
274
size, dst_incr_mod, min_filter_count_x4, \
275
min_filter_len_x4, dst_incr_div, src_incr, \
276
src, dst_end, filter_bank
277
278
mov srcq, r2mp
279
%else ; win64
280
cglobal resample_linear_%1, 0, 15, 5, ctx, phase_mask, src, phase_shift, index, frac, \
281
size, dst_incr_mod, min_filter_count_x4, \
282
min_filter_len_x4, dst_incr_div, src_incr, \
283
dst, dst_end, filter_bank
284
285
mov dstq, r1mp
286
%endif
287
288
; use red-zone for variable storage
289
%define ctx_stackq [rsp-0x8]
290
%define src_stackq [rsp-0x10]
291
%define phase_mask_stackd [rsp-0x14]
292
%if WIN64
293
%define update_context_stackd r4m
294
%else ; unix64
295
%define update_context_stackd [rsp-0x18]
296
%endif
297
298
; load as many variables in registers as possible; for the rest, store
299
; on stack so that we have 'ctx' available as one extra register
300
mov sized, r3d
301
mov phase_maskd, [ctxq+ResampleContext.phase_mask]
302
%if UNIX64
303
mov update_context_stackd, r4d
304
%endif
305
mov indexd, [ctxq+ResampleContext.index]
306
mov fracd, [ctxq+ResampleContext.frac]
307
mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
308
mov filter_bankq, [ctxq+ResampleContext.filter_bank]
309
mov src_incrd, [ctxq+ResampleContext.src_incr]
310
mov ctx_stackq, ctxq
311
mov phase_mask_stackd, phase_maskd
312
mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
313
%ifidn %1, int16
314
movd m4, [pd_0x4000]
315
%else ; float/double
316
cvtsi2s%4 xm0, src_incrd
317
movs%4 xm4, [%5]
318
divs%4 xm4, xm0
319
%endif
320
mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
321
shl min_filter_len_x4d, %3
322
lea dst_endq, [dstq+sizeq*%2]
323
324
%if UNIX64
325
mov ecx, [ctxq+ResampleContext.phase_shift]
326
mov edi, [ctxq+ResampleContext.filter_alloc]
327
328
DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, filter1, \
329
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
330
dst_incr_div, src_incr, src, dst_end, filter_bank
331
%elif WIN64
332
mov R9d, [ctxq+ResampleContext.filter_alloc]
333
mov ecx, [ctxq+ResampleContext.phase_shift]
334
335
DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, filter1, \
336
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
337
dst_incr_div, src_incr, dst, dst_end, filter_bank
338
%endif
339
340
neg min_filter_len_x4q
341
sub filter_bankq, min_filter_len_x4q
342
sub srcq, min_filter_len_x4q
343
mov src_stackq, srcq
344
%else ; x86-32
345
cglobal resample_linear_%1, 1, 7, 5, ctx, min_filter_length_x4, filter2, \
346
frac, index, dst, filter_bank
347
348
; push temp variables to stack
349
%define ctx_stackq r0mp
350
%define src_stackq r2mp
351
%define update_context_stackd r4m
352
353
mov dstq, r1mp
354
mov r3, r3mp
355
lea r3, [dstq+r3*%2]
356
PUSH dword [ctxq+ResampleContext.dst_incr_div]
357
PUSH r3
358
mov r3, dword [ctxq+ResampleContext.filter_alloc]
359
PUSH dword [ctxq+ResampleContext.dst_incr_mod]
360
PUSH r3
361
shl r3, %3
362
PUSH r3
363
mov r3, dword [ctxq+ResampleContext.src_incr]
364
PUSH dword [ctxq+ResampleContext.phase_mask]
365
PUSH r3d
366
%ifidn %1, int16
367
movd m4, [pd_0x4000]
368
%else ; float/double
369
cvtsi2s%4 xm0, r3d
370
movs%4 xm4, [%5]
371
divs%4 xm4, xm0
372
%endif
373
mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
374
mov indexd, [ctxq+ResampleContext.index]
375
shl min_filter_length_x4d, %3
376
mov fracd, [ctxq+ResampleContext.frac]
377
neg min_filter_length_x4q
378
mov filter_bankq, [ctxq+ResampleContext.filter_bank]
379
sub r2mp, min_filter_length_x4q
380
sub filter_bankq, min_filter_length_x4q
381
PUSH min_filter_length_x4q
382
PUSH filter_bankq
383
PUSH dword [ctxq+ResampleContext.phase_shift]
384
385
DEFINE_ARGS filter1, min_filter_count_x4, filter2, frac, index, dst, src
386
387
%define phase_shift_stackd dword [rsp+0x0]
388
%define filter_bankq dword [rsp+0x4]
389
%define min_filter_length_x4q dword [rsp+0x8]
390
%define src_incrd dword [rsp+0xc]
391
%define phase_mask_stackd dword [rsp+0x10]
392
%define filter_alloc_x4q dword [rsp+0x14]
393
%define filter_allocd dword [rsp+0x18]
394
%define dst_incr_modd dword [rsp+0x1c]
395
%define dst_endq dword [rsp+0x20]
396
%define dst_incr_divd dword [rsp+0x24]
397
398
mov srcq, r2mp
399
%endif
400
401
.loop:
402
mov filter1d, filter_allocd
403
imul filter1d, indexd
404
%if ARCH_X86_64
405
mov min_filter_count_x4q, min_filter_len_x4q
406
lea filter1q, [filter_bankq+filter1q*%2]
407
lea filter2q, [filter1q+filter_allocq*%2]
408
%else ; x86-32
409
mov min_filter_count_x4q, filter_bankq
410
lea filter1q, [min_filter_count_x4q+filter1q*%2]
411
mov min_filter_count_x4q, min_filter_length_x4q
412
mov filter2q, filter1q
413
add filter2q, filter_alloc_x4q
414
%endif
415
%ifidn %1, int16
416
mova m0, m4
417
mova m2, m4
418
%else ; float/double
419
xorps m0, m0, m0
420
xorps m2, m2, m2
421
%endif
422
423
align 16
424
.inner_loop:
425
movu m1, [srcq+min_filter_count_x4q*1]
426
%ifidn %1, int16
427
%if cpuflag(xop)
428
vpmadcswd m2, m1, [filter2q+min_filter_count_x4q*1], m2
429
vpmadcswd m0, m1, [filter1q+min_filter_count_x4q*1], m0
430
%else
431
pmaddwd m3, m1, [filter2q+min_filter_count_x4q*1]
432
pmaddwd m1, [filter1q+min_filter_count_x4q*1]
433
paddd m2, m3
434
paddd m0, m1
435
%endif ; cpuflag
436
%else ; float/double
437
%if cpuflag(fma4) || cpuflag(fma3)
438
fmaddp%4 m2, m1, [filter2q+min_filter_count_x4q*1], m2
439
fmaddp%4 m0, m1, [filter1q+min_filter_count_x4q*1], m0
440
%else
441
mulp%4 m3, m1, [filter2q+min_filter_count_x4q*1]
442
mulp%4 m1, m1, [filter1q+min_filter_count_x4q*1]
443
addp%4 m2, m2, m3
444
addp%4 m0, m0, m1
445
%endif ; cpuflag
446
%endif
447
add min_filter_count_x4q, mmsize
448
js .inner_loop
449
450
%ifidn %1, int16
451
%if mmsize == 16
452
%if cpuflag(xop)
453
vphadddq m2, m2
454
vphadddq m0, m0
455
%endif
456
pshufd m3, m2, q0032
457
pshufd m1, m0, q0032
458
paddd m2, m3
459
paddd m0, m1
460
%endif
461
%if notcpuflag(xop)
462
PSHUFLW m3, m2, q0032
463
PSHUFLW m1, m0, q0032
464
paddd m2, m3
465
paddd m0, m1
466
%endif
467
psubd m2, m0
468
; This is probably a really bad idea on atom and other machines with a
469
; long transfer latency between GPRs and XMMs (atom). However, it does
470
; make the clip a lot simpler...
471
movd eax, m2
472
add indexd, dst_incr_divd
473
imul fracd
474
idiv src_incrd
475
movd m1, eax
476
add fracd, dst_incr_modd
477
paddd m0, m1
478
psrad m0, 15
479
packssdw m0, m0
480
movd [dstq], m0
481
482
; note that for imul/idiv, I need to move filter to edx/eax for each:
483
; - 32bit: eax=r0[filter1], edx=r2[filter2]
484
; - win64: eax=r6[filter1], edx=r1[todo]
485
; - unix64: eax=r6[filter1], edx=r2[todo]
486
%else ; float/double
487
; val += (v2 - val) * (FELEML) frac / c->src_incr;
488
%if mmsize == 32
489
vextractf128 xm1, m0, 0x1
490
vextractf128 xm3, m2, 0x1
491
addps xm0, xm1
492
addps xm2, xm3
493
%endif
494
cvtsi2s%4 xm1, fracd
495
subp%4 xm2, xm0
496
mulp%4 xm1, xm4
497
shufp%4 xm1, xm1, q0000
498
%if cpuflag(fma4) || cpuflag(fma3)
499
fmaddp%4 xm0, xm2, xm1, xm0
500
%else
501
mulp%4 xm2, xm1
502
addp%4 xm0, xm2
503
%endif ; cpuflag
504
505
; horizontal sum & store
506
movhlps xm1, xm0
507
%ifidn %1, float
508
addps xm0, xm1
509
shufps xm1, xm0, xm0, q0001
510
%endif
511
add fracd, dst_incr_modd
512
addp%4 xm0, xm1
513
add indexd, dst_incr_divd
514
movs%4 [dstq], xm0
515
%endif
516
cmp fracd, src_incrd
517
jl .skip
518
sub fracd, src_incrd
519
inc indexd
520
521
%if UNIX64
522
DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, index_incr, \
523
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
524
dst_incr_div, src_incr, src, dst_end, filter_bank
525
%elif WIN64
526
DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, index_incr, \
527
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
528
dst_incr_div, src_incr, dst, dst_end, filter_bank
529
%else ; x86-32
530
DEFINE_ARGS filter1, phase_shift, index_incr, frac, index, dst, src
531
%endif
532
533
.skip:
534
%if ARCH_X86_32
535
mov phase_shiftd, phase_shift_stackd
536
%endif
537
mov index_incrd, indexd
538
add dstq, %2
539
and indexd, phase_mask_stackd
540
sar index_incrd, phase_shiftb
541
lea srcq, [srcq+index_incrq*%2]
542
cmp dstq, dst_endq
543
jne .loop
544
545
%if UNIX64
546
DEFINE_ARGS ctx, dst, filter2, phase_shift, index, frac, index_incr, \
547
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
548
dst_incr_div, src_incr, src, dst_end, filter_bank
549
%elif WIN64
550
DEFINE_ARGS ctx, filter2, src, phase_shift, index, frac, index_incr, \
551
dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
552
dst_incr_div, src_incr, dst, dst_end, filter_bank
553
%else ; x86-32
554
DEFINE_ARGS filter1, ctx, update_context, frac, index, dst, src
555
%endif
556
557
cmp dword update_context_stackd, 0
558
jz .skip_store
559
; strictly speaking, the function should always return the consumed
560
; number of bytes; however, we only use the value if update_context
561
; is true, so let's just leave it uninitialized otherwise
562
mov ctxq, ctx_stackq
563
movifnidn rax, srcq
564
mov [ctxq+ResampleContext.frac ], fracd
565
sub rax, src_stackq
566
mov [ctxq+ResampleContext.index], indexd
567
shr rax, %3
568
569
.skip_store:
570
%if ARCH_X86_32
571
ADD rsp, 0x28
572
%endif
573
RET
574
%endmacro
575
576
INIT_XMM sse
577
RESAMPLE_FNS float, 4, 2, s, pf_1
578
579
%if HAVE_AVX_EXTERNAL
580
INIT_YMM avx
581
RESAMPLE_FNS float, 4, 2, s, pf_1
582
%endif
583
%if HAVE_FMA3_EXTERNAL
584
INIT_YMM fma3
585
RESAMPLE_FNS float, 4, 2, s, pf_1
586
%endif
587
%if HAVE_FMA4_EXTERNAL
588
INIT_XMM fma4
589
RESAMPLE_FNS float, 4, 2, s, pf_1
590
%endif
591
592
%if ARCH_X86_32
593
INIT_MMX mmxext
594
RESAMPLE_FNS int16, 2, 1
595
%endif
596
597
INIT_XMM sse2
598
RESAMPLE_FNS int16, 2, 1
599
%if HAVE_XOP_EXTERNAL
600
INIT_XMM xop
601
RESAMPLE_FNS int16, 2, 1
602
%endif
603
604
INIT_XMM sse2
605
RESAMPLE_FNS double, 8, 3, d, pdbl_1
606
607