Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52867 views
1
;*****************************************************************************
2
;* dct-32.asm: x86_32 transform and zigzag
3
;*****************************************************************************
4
;* Copyright (C) 2003-2016 x264 project
5
;*
6
;* Authors: Loren Merritt <lorenm@u.washington.edu>
7
;* Holger Lubitz <holger@lubitz.org>
8
;* Laurent Aimar <fenrir@via.ecp.fr>
9
;* Min Chen <chenm001.163.com>
10
;* Christian Heine <sennindemokrit@gmx.net>
11
;*
12
;* This program is free software; you can redistribute it and/or modify
13
;* it under the terms of the GNU General Public License as published by
14
;* the Free Software Foundation; either version 2 of the License, or
15
;* (at your option) any later version.
16
;*
17
;* This program is distributed in the hope that it will be useful,
18
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
;* GNU General Public License for more details.
21
;*
22
;* You should have received a copy of the GNU General Public License
23
;* along with this program; if not, write to the Free Software
24
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
25
;*
26
;* This program is also available under a commercial proprietary license.
27
;* For more information, contact us at licensing@x264.com.
28
;*****************************************************************************
29
30
%include "x86inc.asm"
31
%include "x86util.asm"
32
33
SECTION .text
34
35
cextern pd_32
36
cextern pw_pixel_max
37
cextern pw_2
38
cextern pw_m2
39
cextern pw_32
40
cextern hsub_mul
41
42
%macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
43
%xdefine %%base %1
44
%rep %0/2
45
%xdefine %%tmp m%2
46
%rotate %0/2
47
mova [%%base + %2*16], %%tmp
48
%rotate 1-%0/2
49
%endrep
50
%endmacro
51
52
%macro UNSPILL_SHUFFLE 3-*
53
%xdefine %%base %1
54
%rep %0/2
55
%xdefine %%tmp m%2
56
%rotate %0/2
57
mova %%tmp, [%%base + %2*16]
58
%rotate 1-%0/2
59
%endrep
60
%endmacro
61
62
%macro SPILL 2+ ; assume offsets are the same as reg numbers
63
SPILL_SHUFFLE %1, %2, %2
64
%endmacro
65
66
%macro UNSPILL 2+
67
UNSPILL_SHUFFLE %1, %2, %2
68
%endmacro
69
70
; in: size, m0..m7
71
; out: 0,4,6 in memory at %10,%11,%12, rest in regs
72
%macro DCT8_1D 12
73
SUMSUB_BA %1, %9, %2 ; %9 = s07, %2 = d07
74
SUMSUB_BA %1, %8, %3 ; %8 = s16, %3 = d16
75
SUMSUB_BA %1, %7, %4 ; %7 = s25, %4 = d25
76
SUMSUB_BA %1, %6, %5 ; %6 = s34, %5 = d34
77
SUMSUB_BA %1, %6, %9 ; %6 = a0, %9 = a2
78
SUMSUB_BA %1, %7, %8 ; %7 = a1, %8 = a3
79
SUMSUB_BA %1, %7, %6 ; %7 = dst0, %6 = dst4
80
mova %10, m%7
81
mova %11, m%6
82
psra%1 m%7, m%8, 1 ; a3>>1
83
padd%1 m%7, m%9 ; a2 + (a3>>1)
84
psra%1 m%9, 1 ; a2>>1
85
psub%1 m%9, m%8 ; (a2>>1) - a3
86
mova %12, m%9
87
psra%1 m%6, m%4, 1
88
padd%1 m%6, m%4 ; d25+(d25>>1)
89
psub%1 m%8, m%2, m%5 ; a5 = d07-d34-(d25+(d25>>1))
90
psub%1 m%8, m%6
91
psra%1 m%6, m%3, 1
92
padd%1 m%6, m%3 ; d16+(d16>>1)
93
padd%1 m%9, m%2, m%5
94
psub%1 m%9, m%6 ; a6 = d07+d34-(d16+(d16>>1))
95
psra%1 m%6, m%2, 1
96
padd%1 m%6, m%2 ; d07+(d07>>1)
97
padd%1 m%6, m%3
98
padd%1 m%6, m%4 ; a4 = d16+d25+(d07+(d07>>1))
99
psra%1 m%2, m%5, 1
100
padd%1 m%2, m%5 ; d34+(d34>>1)
101
padd%1 m%2, m%3
102
psub%1 m%2, m%4 ; a7 = d16-d25+(d34+(d34>>1))
103
psra%1 m%5, m%2, 2
104
padd%1 m%5, m%6 ; a4 + (a7>>2)
105
psra%1 m%4, m%9, 2
106
padd%1 m%4, m%8 ; a5 + (a6>>2)
107
psra%1 m%6, 2
108
psra%1 m%8, 2
109
psub%1 m%6, m%2 ; (a4>>2) - a7
110
psub%1 m%9, m%8 ; a6 - (a5>>2)
111
SWAP %3, %5, %4, %7, %9, %6
112
%endmacro
113
114
; in: size, m[1,2,3,5,6,7], 0,4 in mem at %10,%11
115
; out: m0..m7
116
%macro IDCT8_1D 11
117
psra%1 m%2, m%4, 1
118
psra%1 m%6, m%8, 1
119
psub%1 m%2, m%8
120
padd%1 m%6, m%4
121
psra%1 m%8, m%3, 1
122
padd%1 m%8, m%3
123
padd%1 m%8, m%5
124
padd%1 m%8, m%7
125
psra%1 m%4, m%7, 1
126
padd%1 m%4, m%7
127
padd%1 m%4, m%9
128
psub%1 m%4, m%3
129
psub%1 m%3, m%5
130
psub%1 m%7, m%5
131
padd%1 m%3, m%9
132
psub%1 m%7, m%9
133
psra%1 m%5, 1
134
psra%1 m%9, 1
135
psub%1 m%3, m%5
136
psub%1 m%7, m%9
137
psra%1 m%5, m%8, 2
138
psra%1 m%9, m%4, 2
139
padd%1 m%5, m%7
140
padd%1 m%9, m%3
141
psra%1 m%7, 2
142
psra%1 m%3, 2
143
psub%1 m%8, m%7
144
psub%1 m%3, m%4
145
mova m%4, %10
146
mova m%7, %11
147
SUMSUB_BA %1, %7, %4
148
SUMSUB_BA %1, %6, %7
149
SUMSUB_BA %1, %2, %4
150
SUMSUB_BA %1, %8, %6
151
SUMSUB_BA %1, %3, %2
152
SUMSUB_BA %1, %9, %4
153
SUMSUB_BA %1, %5, %7
154
SWAP %2, %4
155
SWAP %6, %8
156
SWAP %2, %6, %7
157
SWAP %4, %9, %8
158
%endmacro
159
160
%if HIGH_BIT_DEPTH
161
162
%macro SUB8x8_DCT8 0
163
cglobal sub8x8_dct8, 3,3,8
164
global current_function %+ .skip_prologue
165
.skip_prologue:
166
LOAD_DIFF8x4 0,1,2,3, none,none, r1, r2
167
LOAD_DIFF8x4 4,5,6,7, none,none, r1, r2
168
169
DCT8_1D w, 0,1,2,3,4,5,6,7, [r0],[r0+0x10],[r0+0x50]
170
mova m0, [r0]
171
172
mova [r0+0x30], m5
173
mova [r0+0x70], m7
174
TRANSPOSE4x4W 0,1,2,3,4
175
WIDEN_SXWD 0,4
176
WIDEN_SXWD 1,5
177
WIDEN_SXWD 2,6
178
WIDEN_SXWD 3,7
179
DCT8_1D d, 0,4,1,5,2,6,3,7, [r0],[r0+0x80],[r0+0xC0]
180
mova [r0+0x20], m4
181
mova [r0+0x40], m1
182
mova [r0+0x60], m5
183
mova [r0+0xA0], m6
184
mova [r0+0xE0], m7
185
mova m4, [r0+0x10]
186
mova m5, [r0+0x30]
187
mova m6, [r0+0x50]
188
mova m7, [r0+0x70]
189
190
TRANSPOSE4x4W 4,5,6,7,0
191
WIDEN_SXWD 4,0
192
WIDEN_SXWD 5,1
193
WIDEN_SXWD 6,2
194
WIDEN_SXWD 7,3
195
DCT8_1D d,4,0,5,1,6,2,7,3, [r0+0x10],[r0+0x90],[r0+0xD0]
196
mova [r0+0x30], m0
197
mova [r0+0x50], m5
198
mova [r0+0x70], m1
199
mova [r0+0xB0], m2
200
mova [r0+0xF0], m3
201
ret
202
%endmacro ; SUB8x8_DCT8
203
204
INIT_XMM sse2
205
SUB8x8_DCT8
206
INIT_XMM sse4
207
SUB8x8_DCT8
208
INIT_XMM avx
209
SUB8x8_DCT8
210
211
%macro ADD8x8_IDCT8 0
212
cglobal add8x8_idct8, 2,2
213
add r1, 128
214
global current_function %+ .skip_prologue
215
.skip_prologue:
216
UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, -6,-4,-2,2,4,6
217
IDCT8_1D d,0,1,2,3,4,5,6,7,[r1-128],[r1+0]
218
mova [r1+0], m4
219
TRANSPOSE4x4D 0,1,2,3,4
220
paddd m0, [pd_32]
221
mova m4, [r1+0]
222
SPILL_SHUFFLE r1, 0,1,2,3, -8,-6,-4,-2
223
TRANSPOSE4x4D 4,5,6,7,3
224
paddd m4, [pd_32]
225
SPILL_SHUFFLE r1, 4,5,6,7, 0,2,4,6
226
UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, -5,-3,-1,3,5,7
227
IDCT8_1D d,0,1,2,3,4,5,6,7,[r1-112],[r1+16]
228
mova [r1+16], m4
229
TRANSPOSE4x4D 0,1,2,3,4
230
mova m4, [r1+16]
231
mova [r1-112], m0
232
TRANSPOSE4x4D 4,5,6,7,0
233
SPILL_SHUFFLE r1, 4,5,6,7, 1,3,5,7
234
UNSPILL_SHUFFLE r1, 5,6,7, -6,-4,-2
235
IDCT8_1D d,4,5,6,7,0,1,2,3,[r1-128],[r1-112]
236
SPILL_SHUFFLE r1, 4,5,6,7,0,1,2,3, -8,-7,-6,-5,-4,-3,-2,-1
237
UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, 2,4,6,3,5,7
238
IDCT8_1D d,0,1,2,3,4,5,6,7,[r1+0],[r1+16]
239
SPILL_SHUFFLE r1, 7,6,5, 7,6,5
240
mova m7, [pw_pixel_max]
241
pxor m6, m6
242
mova m5, [r1-128]
243
STORE_DIFF m5, m0, m6, m7, [r0+0*FDEC_STRIDEB]
244
mova m0, [r1-112]
245
STORE_DIFF m0, m1, m6, m7, [r0+1*FDEC_STRIDEB]
246
mova m0, [r1-96]
247
STORE_DIFF m0, m2, m6, m7, [r0+2*FDEC_STRIDEB]
248
mova m0, [r1-80]
249
STORE_DIFF m0, m3, m6, m7, [r0+3*FDEC_STRIDEB]
250
mova m0, [r1-64]
251
STORE_DIFF m0, m4, m6, m7, [r0+4*FDEC_STRIDEB]
252
mova m0, [r1-48]
253
mova m1, [r1+80]
254
STORE_DIFF m0, m1, m6, m7, [r0+5*FDEC_STRIDEB]
255
mova m0, [r1-32]
256
mova m1, [r1+96]
257
STORE_DIFF m0, m1, m6, m7, [r0+6*FDEC_STRIDEB]
258
mova m0, [r1-16]
259
mova m1, [r1+112]
260
STORE_DIFF m0, m1, m6, m7, [r0+7*FDEC_STRIDEB]
261
RET
262
%endmacro ; ADD8x8_IDCT8
263
264
INIT_XMM sse2
265
ADD8x8_IDCT8
266
INIT_XMM avx
267
ADD8x8_IDCT8
268
269
%else ; !HIGH_BIT_DEPTH
270
271
INIT_MMX
272
ALIGN 16
273
load_diff_4x8_mmx:
274
LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
275
LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
276
LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
277
LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
278
LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
279
LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
280
movq [r0], m0
281
LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
282
LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
283
movq m0, [r0]
284
ret
285
286
cglobal dct8_mmx
287
DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
288
SAVE_MM_PERMUTATION
289
ret
290
291
;-----------------------------------------------------------------------------
292
; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
293
;-----------------------------------------------------------------------------
294
cglobal sub8x8_dct8_mmx, 3,3
295
global sub8x8_dct8_mmx.skip_prologue
296
.skip_prologue:
297
RESET_MM_PERMUTATION
298
call load_diff_4x8_mmx
299
call dct8_mmx
300
UNSPILL r0, 0
301
TRANSPOSE4x4W 0,1,2,3,4
302
SPILL r0, 0,1,2,3
303
UNSPILL r0, 4,6
304
TRANSPOSE4x4W 4,5,6,7,0
305
SPILL r0, 4,5,6,7
306
RESET_MM_PERMUTATION
307
add r1, 4
308
add r2, 4
309
add r0, 8
310
call load_diff_4x8_mmx
311
sub r1, 4
312
sub r2, 4
313
call dct8_mmx
314
sub r0, 8
315
UNSPILL r0+8, 4,6
316
TRANSPOSE4x4W 4,5,6,7,0
317
SPILL r0+8, 4,5,6,7
318
UNSPILL r0+8, 0
319
TRANSPOSE4x4W 0,1,2,3,5
320
UNSPILL r0, 4,5,6,7
321
SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
322
movq mm4, m6 ; depends on the permutation to not produce conflicts
323
movq mm0, m4
324
movq mm1, m5
325
movq mm2, mm4
326
movq mm3, m7
327
RESET_MM_PERMUTATION
328
UNSPILL r0+8, 4,5,6,7
329
add r0, 8
330
call dct8_mmx
331
sub r0, 8
332
SPILL r0+8, 1,2,3,5,7
333
RESET_MM_PERMUTATION
334
UNSPILL r0, 0,1,2,3,4,5,6,7
335
call dct8_mmx
336
SPILL r0, 1,2,3,5,7
337
ret
338
339
cglobal idct8_mmx
340
IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
341
SAVE_MM_PERMUTATION
342
ret
343
344
%macro ADD_STORE_ROW 3
345
movq m1, [r0+%1*FDEC_STRIDE]
346
punpckhbw m2, m1, m0
347
punpcklbw m1, m0
348
paddw m1, %2
349
paddw m2, %3
350
packuswb m1, m2
351
movq [r0+%1*FDEC_STRIDE], m1
352
%endmacro
353
354
;-----------------------------------------------------------------------------
355
; void add8x8_idct8( uint8_t *dst, int16_t dct[8][8] )
356
;-----------------------------------------------------------------------------
357
cglobal add8x8_idct8_mmx, 2,2
358
global add8x8_idct8_mmx.skip_prologue
359
.skip_prologue:
360
INIT_MMX
361
add word [r1], 32
362
UNSPILL r1, 1,2,3,5,6,7
363
call idct8_mmx
364
SPILL r1, 7
365
TRANSPOSE4x4W 0,1,2,3,7
366
SPILL r1, 0,1,2,3
367
UNSPILL r1, 7
368
TRANSPOSE4x4W 4,5,6,7,0
369
SPILL r1, 4,5,6,7
370
INIT_MMX
371
UNSPILL r1+8, 1,2,3,5,6,7
372
add r1, 8
373
call idct8_mmx
374
sub r1, 8
375
SPILL r1+8, 7
376
TRANSPOSE4x4W 0,1,2,3,7
377
SPILL r1+8, 0,1,2,3
378
UNSPILL r1+8, 7
379
TRANSPOSE4x4W 4,5,6,7,0
380
SPILL r1+8, 4,5,6,7
381
INIT_MMX
382
movq m3, [r1+0x08]
383
movq m0, [r1+0x40]
384
movq [r1+0x40], m3
385
movq [r1+0x08], m0
386
; memory layout at this time:
387
; A0------ A1------
388
; B0------ F0------
389
; C0------ G0------
390
; D0------ H0------
391
; E0------ E1------
392
; B1------ F1------
393
; C1------ G1------
394
; D1------ H1------
395
UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
396
UNSPILL r1+8, 5,6,7
397
add r1, 8
398
call idct8_mmx
399
sub r1, 8
400
psraw m0, 6
401
psraw m1, 6
402
psraw m2, 6
403
psraw m3, 6
404
psraw m4, 6
405
psraw m5, 6
406
psraw m6, 6
407
psraw m7, 6
408
movq [r1+0x08], m0 ; mm4
409
movq [r1+0x48], m4 ; mm5
410
movq [r1+0x58], m5 ; mm0
411
movq [r1+0x68], m6 ; mm2
412
movq [r1+0x78], m7 ; mm6
413
movq mm5, [r1+0x18]
414
movq mm6, [r1+0x28]
415
movq [r1+0x18], m1 ; mm1
416
movq [r1+0x28], m2 ; mm7
417
movq mm7, [r1+0x38]
418
movq [r1+0x38], m3 ; mm3
419
movq mm1, [r1+0x10]
420
movq mm2, [r1+0x20]
421
movq mm3, [r1+0x30]
422
call idct8_mmx
423
psraw m0, 6
424
psraw m1, 6
425
psraw m2, 6
426
psraw m3, 6
427
psraw m4, 6
428
psraw m5, 6
429
psraw m6, 6
430
psraw m7, 6
431
SPILL r1, 0,1,2
432
pxor m0, m0
433
ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
434
ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
435
ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
436
ADD_STORE_ROW 3, m3, [r1+0x38]
437
ADD_STORE_ROW 4, m4, [r1+0x48]
438
ADD_STORE_ROW 5, m5, [r1+0x58]
439
ADD_STORE_ROW 6, m6, [r1+0x68]
440
ADD_STORE_ROW 7, m7, [r1+0x78]
441
ret
442
443
%macro DCT_SUB8 0
444
cglobal sub8x8_dct, 3,3
445
add r2, 4*FDEC_STRIDE
446
global current_function %+ .skip_prologue
447
.skip_prologue:
448
%if cpuflag(ssse3)
449
mova m7, [hsub_mul]
450
%endif
451
LOAD_DIFF8x4 0, 1, 2, 3, 6, 7, r1, r2-4*FDEC_STRIDE
452
SPILL r0, 1,2
453
SWAP 2, 7
454
LOAD_DIFF8x4 4, 5, 6, 7, 1, 2, r1, r2-4*FDEC_STRIDE
455
UNSPILL r0, 1
456
SPILL r0, 7
457
SWAP 2, 7
458
UNSPILL r0, 2
459
DCT4_1D 0, 1, 2, 3, 7
460
TRANSPOSE2x4x4W 0, 1, 2, 3, 7
461
UNSPILL r0, 7
462
SPILL r0, 2
463
DCT4_1D 4, 5, 6, 7, 2
464
TRANSPOSE2x4x4W 4, 5, 6, 7, 2
465
UNSPILL r0, 2
466
SPILL r0, 6
467
DCT4_1D 0, 1, 2, 3, 6
468
UNSPILL r0, 6
469
STORE_DCT 0, 1, 2, 3, r0, 0
470
DCT4_1D 4, 5, 6, 7, 3
471
STORE_DCT 4, 5, 6, 7, r0, 64
472
ret
473
474
;-----------------------------------------------------------------------------
475
; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
476
;-----------------------------------------------------------------------------
477
cglobal sub8x8_dct8, 3,3
478
add r2, 4*FDEC_STRIDE
479
global current_function %+ .skip_prologue
480
.skip_prologue:
481
%if cpuflag(ssse3)
482
mova m7, [hsub_mul]
483
LOAD_DIFF8x4 0, 1, 2, 3, 4, 7, r1, r2-4*FDEC_STRIDE
484
SPILL r0, 0,1
485
SWAP 1, 7
486
LOAD_DIFF8x4 4, 5, 6, 7, 0, 1, r1, r2-4*FDEC_STRIDE
487
UNSPILL r0, 0,1
488
%else
489
LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2-4*FDEC_STRIDE]
490
LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2-3*FDEC_STRIDE]
491
LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2-2*FDEC_STRIDE]
492
LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2-1*FDEC_STRIDE]
493
LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+0*FDEC_STRIDE]
494
LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+1*FDEC_STRIDE]
495
SPILL r0, 0
496
LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+2*FDEC_STRIDE]
497
LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+3*FDEC_STRIDE]
498
UNSPILL r0, 0
499
%endif
500
DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
501
UNSPILL r0, 0,4
502
TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r0+0x60],[r0+0x40],1
503
UNSPILL r0, 4
504
DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
505
SPILL r0, 1,2,3,5,7
506
ret
507
%endmacro
508
509
INIT_XMM sse2
510
%define movdqa movaps
511
%define punpcklqdq movlhps
512
DCT_SUB8
513
%undef movdqa
514
%undef punpcklqdq
515
INIT_XMM ssse3
516
DCT_SUB8
517
INIT_XMM avx
518
DCT_SUB8
519
INIT_XMM xop
520
DCT_SUB8
521
522
;-----------------------------------------------------------------------------
523
; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
524
;-----------------------------------------------------------------------------
525
%macro ADD8x8 0
526
cglobal add8x8_idct, 2,2
527
add r0, 4*FDEC_STRIDE
528
global current_function %+ .skip_prologue
529
.skip_prologue:
530
UNSPILL_SHUFFLE r1, 0,2,1,3, 0,1,2,3
531
SBUTTERFLY qdq, 0, 1, 4
532
SBUTTERFLY qdq, 2, 3, 4
533
UNSPILL_SHUFFLE r1, 4,6,5,7, 4,5,6,7
534
SPILL r1, 0
535
SBUTTERFLY qdq, 4, 5, 0
536
SBUTTERFLY qdq, 6, 7, 0
537
UNSPILL r1,0
538
IDCT4_1D w,0,1,2,3,r1
539
SPILL r1, 4
540
TRANSPOSE2x4x4W 0,1,2,3,4
541
UNSPILL r1, 4
542
IDCT4_1D w,4,5,6,7,r1
543
SPILL r1, 0
544
TRANSPOSE2x4x4W 4,5,6,7,0
545
UNSPILL r1, 0
546
paddw m0, [pw_32]
547
IDCT4_1D w,0,1,2,3,r1
548
paddw m4, [pw_32]
549
IDCT4_1D w,4,5,6,7,r1
550
SPILL r1, 6,7
551
pxor m7, m7
552
DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
553
DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
554
UNSPILL_SHUFFLE r1, 0,2, 6,7
555
DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
556
DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
557
STORE_IDCT m1, m3, m5, m2
558
ret
559
%endmacro ; ADD8x8
560
561
INIT_XMM sse2
562
ADD8x8
563
INIT_XMM avx
564
ADD8x8
565
566
;-----------------------------------------------------------------------------
567
; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
568
;-----------------------------------------------------------------------------
569
%macro ADD8x8_IDCT8 0
570
cglobal add8x8_idct8, 2,2
571
add r0, 4*FDEC_STRIDE
572
global current_function %+ .skip_prologue
573
.skip_prologue:
574
UNSPILL r1, 1,2,3,5,6,7
575
IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
576
SPILL r1, 6
577
TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r1+0x60],[r1+0x40],1
578
paddw m0, [pw_32]
579
SPILL r1, 0
580
IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
581
SPILL r1, 6,7
582
pxor m7, m7
583
DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
584
DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
585
UNSPILL_SHUFFLE r1, 0,2, 6,7
586
DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
587
DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
588
STORE_IDCT m1, m3, m5, m2
589
ret
590
%endmacro ; ADD8x8_IDCT8
591
592
INIT_XMM sse2
593
ADD8x8_IDCT8
594
INIT_XMM avx
595
ADD8x8_IDCT8
596
%endif ; !HIGH_BIT_DEPTH
597
598