Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
52868 views
1
;*****************************************************************************
2
;* dct-64.asm: x86_64 transform and zigzag
3
;*****************************************************************************
4
;* Copyright (C) 2003-2016 x264 project
5
;*
6
;* Authors: Loren Merritt <lorenm@u.washington.edu>
7
;* Holger Lubitz <holger@lubitz.org>
8
;* Laurent Aimar <fenrir@via.ecp.fr>
9
;* Min Chen <chenm001.163.com>
10
;*
11
;* This program is free software; you can redistribute it and/or modify
12
;* it under the terms of the GNU General Public License as published by
13
;* the Free Software Foundation; either version 2 of the License, or
14
;* (at your option) any later version.
15
;*
16
;* This program is distributed in the hope that it will be useful,
17
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
;* GNU General Public License for more details.
20
;*
21
;* You should have received a copy of the GNU General Public License
22
;* along with this program; if not, write to the Free Software
23
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24
;*
25
;* This program is also available under a commercial proprietary license.
26
;* For more information, contact us at licensing@x264.com.
27
;*****************************************************************************
28
29
%include "x86inc.asm"
30
%include "x86util.asm"
31
32
SECTION .text
33
34
cextern pd_32
35
cextern pw_pixel_max
36
cextern pw_2
37
cextern pw_m2
38
cextern pw_32
39
cextern hsub_mul
40
41
; in: size, m0..m7, temp, temp
42
; out: m0..m7
43
%macro DCT8_1D 11
44
SUMSUB_BA %1, %6, %5, %11 ; %6=s34, %5=d34
45
SUMSUB_BA %1, %7, %4, %11 ; %7=s25, %4=d25
46
SUMSUB_BA %1, %8, %3, %11 ; %8=s16, %3=d16
47
SUMSUB_BA %1, %9, %2, %11 ; %9=s07, %2=d07
48
49
SUMSUB_BA %1, %7, %8, %11 ; %7=a1, %8=a3
50
SUMSUB_BA %1, %6, %9, %11 ; %6=a0, %9=a2
51
52
psra%1 m%10, m%2, 1
53
padd%1 m%10, m%2
54
padd%1 m%10, m%3
55
padd%1 m%10, m%4 ; %10=a4
56
57
psra%1 m%11, m%5, 1
58
padd%1 m%11, m%5
59
padd%1 m%11, m%3
60
psub%1 m%11, m%4 ; %11=a7
61
62
SUMSUB_BA %1, %5, %2
63
psub%1 m%2, m%4
64
psub%1 m%5, m%3
65
psra%1 m%4, 1
66
psra%1 m%3, 1
67
psub%1 m%2, m%4 ; %2=a5
68
psub%1 m%5, m%3 ; %5=a6
69
70
psra%1 m%3, m%11, 2
71
padd%1 m%3, m%10 ; %3=b1
72
psra%1 m%10, 2
73
psub%1 m%10, m%11 ; %10=b7
74
75
SUMSUB_BA %1, %7, %6, %11 ; %7=b0, %6=b4
76
77
psra%1 m%4, m%8, 1
78
padd%1 m%4, m%9 ; %4=b2
79
psra%1 m%9, 1
80
psub%1 m%9, m%8 ; %9=b6
81
82
psra%1 m%8, m%5, 2
83
padd%1 m%8, m%2 ; %8=b3
84
psra%1 m%2, 2
85
psub%1 m%5, m%2 ; %5=b5
86
87
SWAP %2, %7, %5, %8, %9, %10
88
%endmacro
89
90
%macro IDCT8_1D 11
91
SUMSUB_BA %1, %6, %2, %10 ; %5=a0, %1=a2
92
93
psra%1 m%10, m%3, 1
94
padd%1 m%10, m%3
95
padd%1 m%10, m%5
96
padd%1 m%10, m%7 ; %9=a7
97
98
psra%1 m%11, m%4, 1
99
psub%1 m%11, m%8 ; %10=a4
100
psra%1 m%8, 1
101
padd%1 m%8, m%4 ; %7=a6
102
103
psra%1 m%4, m%7, 1
104
padd%1 m%4, m%7
105
padd%1 m%4, m%9
106
psub%1 m%4, m%3 ; %3=a5
107
108
psub%1 m%3, m%5
109
psub%1 m%7, m%5
110
padd%1 m%3, m%9
111
psub%1 m%7, m%9
112
psra%1 m%5, 1
113
psra%1 m%9, 1
114
psub%1 m%3, m%5 ; %2=a3
115
psub%1 m%7, m%9 ; %6=a1
116
117
psra%1 m%5, m%10, 2
118
padd%1 m%5, m%7 ; %4=b1
119
psra%1 m%7, 2
120
psub%1 m%10, m%7 ; %9=b7
121
122
SUMSUB_BA %1, %8, %6, %7 ; %7=b0, %5=b6
123
SUMSUB_BA %1, %11, %2, %7 ; %10=b2, %1=b4
124
125
psra%1 m%9, m%4, 2
126
padd%1 m%9, m%3 ; %8=b3
127
psra%1 m%3, 2
128
psub%1 m%3, m%4 ; %2=b5
129
130
SUMSUB_BA %1, %10, %8, %7 ; %9=c0, %7=c7
131
SUMSUB_BA %1, %3, %11, %7 ; %2=c1, %10=c6
132
SUMSUB_BA %1, %9, %2, %7 ; %8=c2, %1=c5
133
SUMSUB_BA %1, %5, %6, %7 ; %4=c3, %5=c4
134
135
SWAP %11, %4
136
SWAP %2, %10, %7
137
SWAP %4, %9, %8
138
%endmacro
139
140
%if HIGH_BIT_DEPTH
141
142
%macro SUB8x8_DCT8 0
143
cglobal sub8x8_dct8, 3,3,14
144
TAIL_CALL .skip_prologue, 0
145
global current_function %+ .skip_prologue
146
.skip_prologue:
147
LOAD_DIFF8x4 0,1,2,3, none,none, r1, r2
148
LOAD_DIFF8x4 4,5,6,7, none,none, r1, r2
149
150
DCT8_1D w, 0,1,2,3,4,5,6,7, 8,9
151
152
TRANSPOSE4x4W 0,1,2,3,8
153
WIDEN_SXWD 0,8
154
WIDEN_SXWD 1,9
155
WIDEN_SXWD 2,10
156
WIDEN_SXWD 3,11
157
DCT8_1D d, 0,8,1,9,2,10,3,11, 12,13
158
mova [r0+0x00], m0
159
mova [r0+0x20], m8
160
mova [r0+0x40], m1
161
mova [r0+0x60], m9
162
mova [r0+0x80], m2
163
mova [r0+0xA0], m10
164
mova [r0+0xC0], m3
165
mova [r0+0xE0], m11
166
167
TRANSPOSE4x4W 4,5,6,7,0
168
WIDEN_SXWD 4,0
169
WIDEN_SXWD 5,1
170
WIDEN_SXWD 6,2
171
WIDEN_SXWD 7,3
172
DCT8_1D d,4,0,5,1,6,2,7,3, 8,9
173
mova [r0+0x10], m4
174
mova [r0+0x30], m0
175
mova [r0+0x50], m5
176
mova [r0+0x70], m1
177
mova [r0+0x90], m6
178
mova [r0+0xB0], m2
179
mova [r0+0xD0], m7
180
mova [r0+0xF0], m3
181
ret
182
%endmacro ; SUB8x8_DCT8
183
184
INIT_XMM sse2
185
SUB8x8_DCT8
186
INIT_XMM sse4
187
SUB8x8_DCT8
188
INIT_XMM avx
189
SUB8x8_DCT8
190
191
%macro ADD8x8_IDCT8 0
192
cglobal add8x8_idct8, 2,2,16
193
add r1, 128
194
TAIL_CALL .skip_prologue, 0
195
global current_function %+ .skip_prologue
196
.skip_prologue:
197
mova m0, [r1-128]
198
mova m1, [r1-96]
199
mova m2, [r1-64]
200
mova m3, [r1-32]
201
mova m4, [r1+ 0]
202
mova m5, [r1+32]
203
mova m6, [r1+64]
204
mova m7, [r1+96]
205
IDCT8_1D d,0,1,2,3,4,5,6,7,8,9
206
TRANSPOSE4x4D 0,1,2,3,8
207
TRANSPOSE4x4D 4,5,6,7,8
208
paddd m0, [pd_32]
209
paddd m4, [pd_32]
210
mova [r1+64], m6
211
mova [r1+96], m7
212
mova m8, [r1-112]
213
mova m9, [r1-80]
214
mova m10, [r1-48]
215
mova m11, [r1-16]
216
mova m12, [r1+16]
217
mova m13, [r1+48]
218
mova m14, [r1+80]
219
mova m15, [r1+112]
220
IDCT8_1D d,8,9,10,11,12,13,14,15,6,7
221
TRANSPOSE4x4D 8,9,10,11,6
222
TRANSPOSE4x4D 12,13,14,15,6
223
IDCT8_1D d,0,1,2,3,8,9,10,11,6,7
224
mova [r1-112], m8
225
mova [r1-80], m9
226
mova m6, [r1+64]
227
mova m7, [r1+96]
228
IDCT8_1D d,4,5,6,7,12,13,14,15,8,9
229
pxor m8, m8
230
mova m9, [pw_pixel_max]
231
STORE_DIFF m0, m4, m8, m9, [r0+0*FDEC_STRIDEB]
232
STORE_DIFF m1, m5, m8, m9, [r0+1*FDEC_STRIDEB]
233
STORE_DIFF m2, m6, m8, m9, [r0+2*FDEC_STRIDEB]
234
STORE_DIFF m3, m7, m8, m9, [r0+3*FDEC_STRIDEB]
235
mova m0, [r1-112]
236
mova m1, [r1-80]
237
STORE_DIFF m0, m12, m8, m9, [r0+4*FDEC_STRIDEB]
238
STORE_DIFF m1, m13, m8, m9, [r0+5*FDEC_STRIDEB]
239
STORE_DIFF m10, m14, m8, m9, [r0+6*FDEC_STRIDEB]
240
STORE_DIFF m11, m15, m8, m9, [r0+7*FDEC_STRIDEB]
241
ret
242
%endmacro ; ADD8x8_IDCT8
243
244
INIT_XMM sse2
245
ADD8x8_IDCT8
246
INIT_XMM avx
247
ADD8x8_IDCT8
248
249
%else ; !HIGH_BIT_DEPTH
250
251
%macro DCT_SUB8 0
252
cglobal sub8x8_dct, 3,3,10
253
add r2, 4*FDEC_STRIDE
254
%if cpuflag(ssse3)
255
mova m7, [hsub_mul]
256
%endif
257
TAIL_CALL .skip_prologue, 0
258
global current_function %+ .skip_prologue
259
.skip_prologue:
260
SWAP 7, 9
261
LOAD_DIFF8x4 0, 1, 2, 3, 8, 9, r1, r2-4*FDEC_STRIDE
262
LOAD_DIFF8x4 4, 5, 6, 7, 8, 9, r1, r2-4*FDEC_STRIDE
263
DCT4_1D 0, 1, 2, 3, 8
264
TRANSPOSE2x4x4W 0, 1, 2, 3, 8
265
DCT4_1D 4, 5, 6, 7, 8
266
TRANSPOSE2x4x4W 4, 5, 6, 7, 8
267
DCT4_1D 0, 1, 2, 3, 8
268
STORE_DCT 0, 1, 2, 3, r0, 0
269
DCT4_1D 4, 5, 6, 7, 8
270
STORE_DCT 4, 5, 6, 7, r0, 64
271
ret
272
273
;-----------------------------------------------------------------------------
274
; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
275
;-----------------------------------------------------------------------------
276
cglobal sub8x8_dct8, 3,3,11
277
add r2, 4*FDEC_STRIDE
278
%if cpuflag(ssse3)
279
mova m7, [hsub_mul]
280
%endif
281
TAIL_CALL .skip_prologue, 0
282
global current_function %+ .skip_prologue
283
.skip_prologue:
284
SWAP 7, 10
285
LOAD_DIFF8x4 0, 1, 2, 3, 4, 10, r1, r2-4*FDEC_STRIDE
286
LOAD_DIFF8x4 4, 5, 6, 7, 8, 10, r1, r2-4*FDEC_STRIDE
287
DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
288
TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
289
DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
290
movdqa [r0+0x00], m0
291
movdqa [r0+0x10], m1
292
movdqa [r0+0x20], m2
293
movdqa [r0+0x30], m3
294
movdqa [r0+0x40], m4
295
movdqa [r0+0x50], m5
296
movdqa [r0+0x60], m6
297
movdqa [r0+0x70], m7
298
ret
299
%endmacro
300
301
INIT_XMM sse2
302
%define movdqa movaps
303
%define punpcklqdq movlhps
304
DCT_SUB8
305
%undef movdqa
306
%undef punpcklqdq
307
INIT_XMM ssse3
308
DCT_SUB8
309
INIT_XMM avx
310
DCT_SUB8
311
INIT_XMM xop
312
DCT_SUB8
313
314
INIT_YMM avx2
315
cglobal sub16x16_dct8, 3,3,10
316
add r0, 128
317
add r2, 4*FDEC_STRIDE
318
call .sub16x8_dct8
319
add r0, 256
320
add r1, FENC_STRIDE*8
321
add r2, FDEC_STRIDE*8
322
call .sub16x8_dct8
323
RET
324
.sub16x8_dct8:
325
LOAD_DIFF16x2_AVX2 0, 1, 2, 3, 0, 1
326
LOAD_DIFF16x2_AVX2 2, 3, 4, 5, 2, 3
327
LOAD_DIFF16x2_AVX2 4, 5, 6, 7, 4, 5
328
LOAD_DIFF16x2_AVX2 6, 7, 8, 9, 6, 7
329
DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
330
TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
331
DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
332
mova [r0-0x80+0x00], xm0
333
vextracti128 [r0+0x00], m0, 1
334
mova [r0-0x80+0x10], xm1
335
vextracti128 [r0+0x10], m1, 1
336
mova [r0-0x80+0x20], xm2
337
vextracti128 [r0+0x20], m2, 1
338
mova [r0-0x80+0x30], xm3
339
vextracti128 [r0+0x30], m3, 1
340
mova [r0-0x80+0x40], xm4
341
vextracti128 [r0+0x40], m4, 1
342
mova [r0-0x80+0x50], xm5
343
vextracti128 [r0+0x50], m5, 1
344
mova [r0-0x80+0x60], xm6
345
vextracti128 [r0+0x60], m6, 1
346
mova [r0-0x80+0x70], xm7
347
vextracti128 [r0+0x70], m7, 1
348
ret
349
350
;-----------------------------------------------------------------------------
351
; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
352
;-----------------------------------------------------------------------------
353
%macro ADD8x8_IDCT8 0
354
cglobal add8x8_idct8, 2,2,11
355
add r0, 4*FDEC_STRIDE
356
pxor m7, m7
357
TAIL_CALL .skip_prologue, 0
358
global current_function %+ .skip_prologue
359
.skip_prologue:
360
SWAP 7, 9
361
movdqa m0, [r1+0x00]
362
movdqa m1, [r1+0x10]
363
movdqa m2, [r1+0x20]
364
movdqa m3, [r1+0x30]
365
movdqa m4, [r1+0x40]
366
movdqa m5, [r1+0x50]
367
movdqa m6, [r1+0x60]
368
movdqa m7, [r1+0x70]
369
IDCT8_1D w,0,1,2,3,4,5,6,7,8,10
370
TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
371
paddw m0, [pw_32] ; rounding for the >>6 at the end
372
IDCT8_1D w,0,1,2,3,4,5,6,7,8,10
373
DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
374
DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
375
DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
376
DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
377
STORE_IDCT m1, m3, m5, m7
378
ret
379
%endmacro ; ADD8x8_IDCT8
380
381
INIT_XMM sse2
382
ADD8x8_IDCT8
383
INIT_XMM avx
384
ADD8x8_IDCT8
385
386
;-----------------------------------------------------------------------------
387
; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
388
;-----------------------------------------------------------------------------
389
%macro ADD8x8 0
390
cglobal add8x8_idct, 2,2,11
391
add r0, 4*FDEC_STRIDE
392
pxor m7, m7
393
TAIL_CALL .skip_prologue, 0
394
global current_function %+ .skip_prologue
395
.skip_prologue:
396
SWAP 7, 9
397
mova m0, [r1+ 0]
398
mova m2, [r1+16]
399
mova m1, [r1+32]
400
mova m3, [r1+48]
401
SBUTTERFLY qdq, 0, 1, 4
402
SBUTTERFLY qdq, 2, 3, 4
403
mova m4, [r1+64]
404
mova m6, [r1+80]
405
mova m5, [r1+96]
406
mova m7, [r1+112]
407
SBUTTERFLY qdq, 4, 5, 8
408
SBUTTERFLY qdq, 6, 7, 8
409
IDCT4_1D w,0,1,2,3,8,10
410
TRANSPOSE2x4x4W 0,1,2,3,8
411
IDCT4_1D w,4,5,6,7,8,10
412
TRANSPOSE2x4x4W 4,5,6,7,8
413
paddw m0, [pw_32]
414
IDCT4_1D w,0,1,2,3,8,10
415
paddw m4, [pw_32]
416
IDCT4_1D w,4,5,6,7,8,10
417
DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
418
DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
419
DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
420
DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
421
STORE_IDCT m1, m3, m5, m7
422
ret
423
%endmacro ; ADD8x8
424
425
INIT_XMM sse2
426
ADD8x8
427
INIT_XMM avx
428
ADD8x8
429
430
%endif ; !HIGH_BIT_DEPTH
431
432