Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/ARM64/Arm64IRCompFPU.cpp
3189 views
1
// Copyright (c) 2023- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
// In other words, PPSSPP_ARCH(ARM64) || DISASM_ALL.
20
#if PPSSPP_ARCH(ARM64) || (PPSSPP_PLATFORM(WINDOWS) && !defined(__LIBRETRO__))
21
22
#ifndef offsetof
23
#include <cstddef>
24
#endif
25
26
#include "Core/MIPS/ARM64/Arm64IRJit.h"
27
#include "Core/MIPS/ARM64/Arm64IRRegCache.h"
28
29
// This file contains compilation for floating point related instructions.
30
//
31
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
32
// Currently known non working ones should have DISABLE. No flags because that's in IR already.
33
34
// #define CONDITIONAL_DISABLE { CompIR_Generic(inst); return; }
35
#define CONDITIONAL_DISABLE {}
36
#define DISABLE { CompIR_Generic(inst); return; }
37
#define INVALIDOP { _assert_msg_(false, "Invalid IR inst %d", (int)inst.op); CompIR_Generic(inst); return; }
38
39
namespace MIPSComp {
40
41
using namespace Arm64Gen;
42
using namespace Arm64IRJitConstants;
43
44
void Arm64JitBackend::CompIR_FArith(IRInst inst) {
45
CONDITIONAL_DISABLE;
46
47
switch (inst.op) {
48
case IROp::FAdd:
49
regs_.Map(inst);
50
fp_.FADD(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
51
break;
52
53
case IROp::FSub:
54
regs_.Map(inst);
55
fp_.FSUB(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
56
break;
57
58
case IROp::FMul:
59
regs_.Map(inst);
60
fp_.FMUL(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
61
break;
62
63
case IROp::FDiv:
64
regs_.Map(inst);
65
fp_.FDIV(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
66
break;
67
68
case IROp::FSqrt:
69
regs_.Map(inst);
70
fp_.FSQRT(regs_.F(inst.dest), regs_.F(inst.src1));
71
break;
72
73
case IROp::FNeg:
74
regs_.Map(inst);
75
fp_.FNEG(regs_.F(inst.dest), regs_.F(inst.src1));
76
break;
77
78
default:
79
INVALIDOP;
80
break;
81
}
82
}
83
84
void Arm64JitBackend::CompIR_FAssign(IRInst inst) {
85
CONDITIONAL_DISABLE;
86
87
switch (inst.op) {
88
case IROp::FMov:
89
if (inst.dest != inst.src1) {
90
regs_.Map(inst);
91
fp_.FMOV(regs_.F(inst.dest), regs_.F(inst.src1));
92
}
93
break;
94
95
case IROp::FAbs:
96
regs_.Map(inst);
97
fp_.FABS(regs_.F(inst.dest), regs_.F(inst.src1));
98
break;
99
100
case IROp::FSign:
101
regs_.Map(inst);
102
// We'll need this flag later. Vector could use a temp and FCMEQ.
103
fp_.FCMP(regs_.F(inst.src1));
104
105
fp_.MOVI2FDUP(EncodeRegToDouble(SCRATCHF1), 1.0f);
106
// Invert 0x80000000 -> 0x7FFFFFFF as a mask for sign.
107
fp_.MVNI(32, EncodeRegToDouble(SCRATCHF2), 0x80, 24);
108
// Keep the sign bit in dest, replace all other bits from 1.0f.
109
if (inst.dest != inst.src1)
110
fp_.FMOV(regs_.FD(inst.dest), regs_.FD(inst.src1));
111
fp_.BIT(regs_.FD(inst.dest), EncodeRegToDouble(SCRATCHF1), EncodeRegToDouble(SCRATCHF2));
112
113
// It's later now, let's replace with zero if that FCmp was EQ to zero.
114
fp_.MOVI2FDUP(EncodeRegToDouble(SCRATCHF1), 0.0f);
115
fp_.FCSEL(regs_.F(inst.dest), SCRATCHF1, regs_.F(inst.dest), CC_EQ);
116
break;
117
118
default:
119
INVALIDOP;
120
break;
121
}
122
}
123
124
void Arm64JitBackend::CompIR_FCompare(IRInst inst) {
125
CONDITIONAL_DISABLE;
126
127
constexpr IRReg IRREG_VFPU_CC = IRREG_VFPU_CTRL_BASE + VFPU_CTRL_CC;
128
129
switch (inst.op) {
130
case IROp::FCmp:
131
switch (inst.dest) {
132
case IRFpCompareMode::False:
133
regs_.SetGPRImm(IRREG_FPCOND, 0);
134
break;
135
136
case IRFpCompareMode::EitherUnordered:
137
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
138
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
139
CSET(regs_.R(IRREG_FPCOND), CC_VS);
140
break;
141
142
case IRFpCompareMode::EqualOrdered:
143
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
144
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
145
CSET(regs_.R(IRREG_FPCOND), CC_EQ);
146
break;
147
148
case IRFpCompareMode::EqualUnordered:
149
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
150
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
151
CSET(regs_.R(IRREG_FPCOND), CC_EQ);
152
// If ordered, use the above result. If unordered, use ZR+1 (being 1.)
153
CSINC(regs_.R(IRREG_FPCOND), regs_.R(IRREG_FPCOND), WZR, CC_VC);
154
break;
155
156
case IRFpCompareMode::LessEqualOrdered:
157
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
158
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
159
CSET(regs_.R(IRREG_FPCOND), CC_LS);
160
break;
161
162
case IRFpCompareMode::LessEqualUnordered:
163
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
164
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
165
CSET(regs_.R(IRREG_FPCOND), CC_LE);
166
break;
167
168
case IRFpCompareMode::LessOrdered:
169
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
170
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
171
CSET(regs_.R(IRREG_FPCOND), CC_LO);
172
break;
173
174
case IRFpCompareMode::LessUnordered:
175
regs_.MapWithExtra(inst, { { 'G', IRREG_FPCOND, 1, MIPSMap::NOINIT } });
176
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
177
CSET(regs_.R(IRREG_FPCOND), CC_LT);
178
break;
179
180
default:
181
_assert_msg_(false, "Unexpected IRFpCompareMode %d", inst.dest);
182
}
183
break;
184
185
case IROp::FCmovVfpuCC:
186
regs_.MapWithExtra(inst, { { 'G', IRREG_VFPU_CC, 1, MIPSMap::INIT } });
187
TSTI2R(regs_.R(IRREG_VFPU_CC), 1ULL << (inst.src2 & 0xF));
188
if ((inst.src2 >> 7) & 1) {
189
fp_.FCSEL(regs_.F(inst.dest), regs_.F(inst.dest), regs_.F(inst.src1), CC_EQ);
190
} else {
191
fp_.FCSEL(regs_.F(inst.dest), regs_.F(inst.dest), regs_.F(inst.src1), CC_NEQ);
192
}
193
break;
194
195
case IROp::FCmpVfpuBit:
196
regs_.MapGPR(IRREG_VFPU_CC, MIPSMap::DIRTY);
197
198
switch (VCondition(inst.dest & 0xF)) {
199
case VC_EQ:
200
regs_.Map(inst);
201
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
202
CSET(SCRATCH1, CC_EQ);
203
break;
204
case VC_NE:
205
regs_.Map(inst);
206
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
207
CSET(SCRATCH1, CC_NEQ);
208
break;
209
case VC_LT:
210
regs_.Map(inst);
211
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
212
CSET(SCRATCH1, CC_LO);
213
break;
214
case VC_LE:
215
regs_.Map(inst);
216
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
217
CSET(SCRATCH1, CC_LS);
218
break;
219
case VC_GT:
220
regs_.Map(inst);
221
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
222
CSET(SCRATCH1, CC_GT);
223
break;
224
case VC_GE:
225
regs_.Map(inst);
226
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
227
CSET(SCRATCH1, CC_GE);
228
break;
229
case VC_EZ:
230
regs_.MapFPR(inst.src1);
231
fp_.FCMP(regs_.F(inst.src1));
232
CSET(SCRATCH1, CC_EQ);
233
break;
234
case VC_NZ:
235
regs_.MapFPR(inst.src1);
236
fp_.FCMP(regs_.F(inst.src1));
237
CSET(SCRATCH1, CC_NEQ);
238
break;
239
case VC_EN:
240
regs_.MapFPR(inst.src1);
241
fp_.FCMP(regs_.F(inst.src1));
242
CSET(SCRATCH1, CC_VS);
243
break;
244
case VC_NN:
245
regs_.MapFPR(inst.src1);
246
fp_.FCMP(regs_.F(inst.src1));
247
CSET(SCRATCH1, CC_VC);
248
break;
249
case VC_EI:
250
regs_.MapFPR(inst.src1);
251
// Compare abs(f) >= Infinity. Could use FACGE for vector.
252
MOVI2R(SCRATCH1, 0x7F800000);
253
fp_.FMOV(SCRATCHF2, SCRATCH1);
254
fp_.FABS(SCRATCHF1, regs_.F(inst.src1));
255
fp_.FCMP(SCRATCHF1, SCRATCHF2);
256
CSET(SCRATCH1, CC_GE);
257
break;
258
case VC_NI:
259
regs_.MapFPR(inst.src1);
260
// Compare abs(f) < Infinity.
261
MOVI2R(SCRATCH1, 0x7F800000);
262
fp_.FMOV(SCRATCHF2, SCRATCH1);
263
fp_.FABS(SCRATCHF1, regs_.F(inst.src1));
264
fp_.FCMP(SCRATCHF1, SCRATCHF2);
265
// Less than or NAN.
266
CSET(SCRATCH1, CC_LT);
267
break;
268
case VC_ES:
269
regs_.MapFPR(inst.src1);
270
// Compare abs(f) < Infinity.
271
MOVI2R(SCRATCH1, 0x7F800000);
272
fp_.FMOV(SCRATCHF2, SCRATCH1);
273
fp_.FABS(SCRATCHF1, regs_.F(inst.src1));
274
fp_.FCMP(SCRATCHF1, SCRATCHF2);
275
// Greater than or equal to Infinity, or NAN.
276
CSET(SCRATCH1, CC_HS);
277
break;
278
case VC_NS:
279
regs_.MapFPR(inst.src1);
280
// Compare abs(f) < Infinity.
281
MOVI2R(SCRATCH1, 0x7F800000);
282
fp_.FMOV(SCRATCHF2, SCRATCH1);
283
fp_.FABS(SCRATCHF1, regs_.F(inst.src1));
284
fp_.FCMP(SCRATCHF1, SCRATCHF2);
285
// Less than Infinity, but not NAN.
286
CSET(SCRATCH1, CC_LO);
287
break;
288
case VC_TR:
289
MOVI2R(SCRATCH1, 1);
290
break;
291
case VC_FL:
292
MOVI2R(SCRATCH1, 0);
293
break;
294
}
295
296
BFI(regs_.R(IRREG_VFPU_CC), SCRATCH1, inst.dest >> 4, 1);
297
break;
298
299
case IROp::FCmpVfpuAggregate:
300
regs_.MapGPR(IRREG_VFPU_CC, MIPSMap::DIRTY);
301
if (inst.dest == 1) {
302
// Just replicate the lowest bit to the others.
303
BFI(regs_.R(IRREG_VFPU_CC), regs_.R(IRREG_VFPU_CC), 4, 1);
304
BFI(regs_.R(IRREG_VFPU_CC), regs_.R(IRREG_VFPU_CC), 5, 1);
305
} else {
306
MOVI2R(SCRATCH1, inst.dest);
307
// Grab the any bit.
308
TST(regs_.R(IRREG_VFPU_CC), SCRATCH1);
309
CSET(SCRATCH2, CC_NEQ);
310
// Now the all bit, by clearing our mask to zero.
311
BICS(WZR, SCRATCH1, regs_.R(IRREG_VFPU_CC));
312
CSET(SCRATCH1, CC_EQ);
313
314
// Insert the bits into place.
315
BFI(regs_.R(IRREG_VFPU_CC), SCRATCH2, 4, 1);
316
BFI(regs_.R(IRREG_VFPU_CC), SCRATCH1, 5, 1);
317
}
318
break;
319
320
default:
321
INVALIDOP;
322
break;
323
}
324
}
325
326
void Arm64JitBackend::CompIR_FCondAssign(IRInst inst) {
327
CONDITIONAL_DISABLE;
328
329
// For Vec4, we could basically just ORR FCMPGE/FCMPLE together, but overlap is trickier.
330
regs_.Map(inst);
331
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src2));
332
FixupBranch unordered = B(CC_VS);
333
334
switch (inst.op) {
335
case IROp::FMin:
336
fp_.FMIN(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
337
break;
338
339
case IROp::FMax:
340
fp_.FMAX(regs_.F(inst.dest), regs_.F(inst.src1), regs_.F(inst.src2));
341
break;
342
343
default:
344
INVALIDOP;
345
break;
346
}
347
348
FixupBranch orderedDone = B();
349
350
// Not sure if this path is fast, trying to optimize it to be small but correct.
351
// Probably an uncommon path.
352
SetJumpTarget(unordered);
353
fp_.AND(EncodeRegToDouble(SCRATCHF1), regs_.FD(inst.src1), regs_.FD(inst.src2));
354
// SCRATCHF1 = 0xFFFFFFFF if sign bit set on both, 0x00000000 otherwise.
355
fp_.CMLT(32, EncodeRegToDouble(SCRATCHF1), EncodeRegToDouble(SCRATCHF1));
356
357
switch (inst.op) {
358
case IROp::FMin:
359
fp_.SMAX(32, EncodeRegToDouble(SCRATCHF2), regs_.FD(inst.src1), regs_.FD(inst.src2));
360
fp_.SMIN(32, regs_.FD(inst.dest), regs_.FD(inst.src1), regs_.FD(inst.src2));
361
break;
362
363
case IROp::FMax:
364
fp_.SMIN(32, EncodeRegToDouble(SCRATCHF2), regs_.FD(inst.src1), regs_.FD(inst.src2));
365
fp_.SMAX(32, regs_.FD(inst.dest), regs_.FD(inst.src1), regs_.FD(inst.src2));
366
break;
367
368
default:
369
INVALIDOP;
370
break;
371
}
372
// Replace dest with SCRATCHF2 if both were less than zero.
373
fp_.BIT(regs_.FD(inst.dest), EncodeRegToDouble(SCRATCHF2), EncodeRegToDouble(SCRATCHF1));
374
375
SetJumpTarget(orderedDone);
376
}
377
378
void Arm64JitBackend::CompIR_FCvt(IRInst inst) {
379
CONDITIONAL_DISABLE;
380
381
switch (inst.op) {
382
case IROp::FCvtWS:
383
// TODO: Unfortunately, we don't currently have the hasSetRounding flag, could skip lookup.
384
regs_.Map(inst);
385
fp_.FMOV(S0, regs_.F(inst.src1));
386
387
MOVP2R(SCRATCH1_64, &currentRoundingFunc_);
388
LDR(INDEX_UNSIGNED, SCRATCH1_64, SCRATCH1_64, 0);
389
BLR(SCRATCH1_64);
390
391
fp_.FMOV(regs_.F(inst.dest), S0);
392
break;
393
394
case IROp::FCvtSW:
395
regs_.Map(inst);
396
fp_.SCVTF(regs_.F(inst.dest), regs_.F(inst.src1));
397
break;
398
399
case IROp::FCvtScaledWS:
400
if (IRRoundMode(inst.src2 >> 6) == IRRoundMode::CAST_1) {
401
regs_.Map(inst);
402
// NAN would convert to zero, so detect it specifically and replace with 0x7FFFFFFF.
403
fp_.MVNI(32, EncodeRegToDouble(SCRATCHF2), 0x80, 24);
404
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src1));
405
fp_.FCVTZS(regs_.F(inst.dest), regs_.F(inst.src1), inst.src2 & 0x1F);
406
fp_.FCSEL(regs_.F(inst.dest), regs_.F(inst.dest), SCRATCHF2, CC_VC);
407
} else {
408
RoundingMode rm;
409
switch (IRRoundMode(inst.src2 >> 6)) {
410
case IRRoundMode::RINT_0: rm = RoundingMode::ROUND_N; break;
411
case IRRoundMode::CEIL_2: rm = RoundingMode::ROUND_P; break;
412
case IRRoundMode::FLOOR_3: rm = RoundingMode::ROUND_M; break;
413
default:
414
_assert_msg_(false, "Invalid rounding mode for FCvtScaledWS");
415
return;
416
}
417
418
// Unfortunately, only Z has a direct scaled instruction.
419
// We'll have to multiply.
420
regs_.Map(inst);
421
fp_.MOVI2F(SCRATCHF1, (float)(1UL << (inst.src2 & 0x1F)), SCRATCH1);
422
// This is for the NAN result.
423
fp_.MVNI(32, EncodeRegToDouble(SCRATCHF2), 0x80, 24);
424
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src1));
425
fp_.FMUL(regs_.F(inst.dest), regs_.F(inst.src1), SCRATCHF1);
426
fp_.FCVTS(regs_.F(inst.dest), regs_.F(inst.dest), rm);
427
fp_.FCSEL(regs_.F(inst.dest), regs_.F(inst.dest), SCRATCHF2, CC_VC);
428
}
429
break;
430
431
case IROp::FCvtScaledSW:
432
// TODO: This is probably proceeded by a GPR transfer, might be ideal to combine.
433
regs_.Map(inst);
434
fp_.SCVTF(regs_.F(inst.dest), regs_.F(inst.src1), inst.src2 & 0x1F);
435
break;
436
437
default:
438
INVALIDOP;
439
break;
440
}
441
}
442
443
void Arm64JitBackend::CompIR_FRound(IRInst inst) {
444
CONDITIONAL_DISABLE;
445
446
regs_.Map(inst);
447
// Invert 0x80000000 -> 0x7FFFFFFF for the NAN result.
448
fp_.MVNI(32, EncodeRegToDouble(SCRATCHF1), 0x80, 24);
449
fp_.FCMP(regs_.F(inst.src1), regs_.F(inst.src1));
450
451
// Luckily, these already saturate.
452
switch (inst.op) {
453
case IROp::FRound:
454
fp_.FCVTS(regs_.F(inst.dest), regs_.F(inst.src1), ROUND_N);
455
break;
456
457
case IROp::FTrunc:
458
fp_.FCVTS(regs_.F(inst.dest), regs_.F(inst.src1), ROUND_Z);
459
break;
460
461
case IROp::FCeil:
462
fp_.FCVTS(regs_.F(inst.dest), regs_.F(inst.src1), ROUND_P);
463
break;
464
465
case IROp::FFloor:
466
fp_.FCVTS(regs_.F(inst.dest), regs_.F(inst.src1), ROUND_M);
467
break;
468
469
default:
470
INVALIDOP;
471
break;
472
}
473
474
// Switch to INT_MAX if it was NAN.
475
fp_.FCSEL(regs_.F(inst.dest), regs_.F(inst.dest), SCRATCHF1, CC_VC);
476
}
477
478
void Arm64JitBackend::CompIR_FSat(IRInst inst) {
479
CONDITIONAL_DISABLE;
480
481
switch (inst.op) {
482
case IROp::FSat0_1:
483
regs_.Map(inst);
484
fp_.MOVI2F(SCRATCHF1, 1.0f);
485
// Note that FMAX takes the larger of the two zeros, which is what we want.
486
fp_.MOVI2F(SCRATCHF2, 0.0f);
487
488
fp_.FMIN(regs_.F(inst.dest), regs_.F(inst.src1), SCRATCHF1);
489
fp_.FMAX(regs_.F(inst.dest), regs_.F(inst.dest), SCRATCHF2);
490
break;
491
492
case IROp::FSatMinus1_1:
493
regs_.Map(inst);
494
fp_.MOVI2F(SCRATCHF1, 1.0f);
495
fp_.FNEG(SCRATCHF2, SCRATCHF1);
496
497
fp_.FMIN(regs_.F(inst.dest), regs_.F(inst.src1), SCRATCHF1);
498
fp_.FMAX(regs_.F(inst.dest), regs_.F(inst.dest), SCRATCHF2);
499
break;
500
501
default:
502
INVALIDOP;
503
break;
504
}
505
}
506
507
void Arm64JitBackend::CompIR_FSpecial(IRInst inst) {
508
CONDITIONAL_DISABLE;
509
510
auto callFuncF_F = [&](float (*func)(float)) {
511
regs_.FlushBeforeCall();
512
WriteDebugProfilerStatus(IRProfilerStatus::MATH_HELPER);
513
514
// It might be in a non-volatile register.
515
// TODO: May have to handle a transfer if SIMD here.
516
if (regs_.IsFPRMapped(inst.src1)) {
517
int lane = regs_.GetFPRLane(inst.src1);
518
if (lane == 0)
519
fp_.FMOV(S0, regs_.F(inst.src1));
520
else
521
fp_.DUP(32, Q0, regs_.F(inst.src1), lane);
522
} else {
523
int offset = offsetof(MIPSState, f) + inst.src1 * 4;
524
fp_.LDR(32, INDEX_UNSIGNED, S0, CTXREG, offset);
525
}
526
QuickCallFunction(SCRATCH2_64, func);
527
528
regs_.MapFPR(inst.dest, MIPSMap::NOINIT);
529
// If it's already F10, we're done - MapReg doesn't actually overwrite the reg in that case.
530
if (regs_.F(inst.dest) != S0) {
531
fp_.FMOV(regs_.F(inst.dest), S0);
532
}
533
534
WriteDebugProfilerStatus(IRProfilerStatus::IN_JIT);
535
};
536
537
switch (inst.op) {
538
case IROp::FSin:
539
callFuncF_F(&vfpu_sin);
540
break;
541
542
case IROp::FCos:
543
callFuncF_F(&vfpu_cos);
544
break;
545
546
case IROp::FRSqrt:
547
regs_.Map(inst);
548
fp_.MOVI2F(SCRATCHF1, 1.0f);
549
fp_.FSQRT(regs_.F(inst.dest), regs_.F(inst.src1));
550
fp_.FDIV(regs_.F(inst.dest), SCRATCHF1, regs_.F(inst.dest));
551
break;
552
553
case IROp::FRecip:
554
regs_.Map(inst);
555
fp_.MOVI2F(SCRATCHF1, 1.0f);
556
fp_.FDIV(regs_.F(inst.dest), SCRATCHF1, regs_.F(inst.src1));
557
break;
558
559
case IROp::FAsin:
560
callFuncF_F(&vfpu_asin);
561
break;
562
563
default:
564
INVALIDOP;
565
break;
566
}
567
}
568
569
void Arm64JitBackend::CompIR_RoundingMode(IRInst inst) {
570
CONDITIONAL_DISABLE;
571
572
switch (inst.op) {
573
case IROp::RestoreRoundingMode:
574
RestoreRoundingMode();
575
break;
576
577
case IROp::ApplyRoundingMode:
578
ApplyRoundingMode();
579
break;
580
581
case IROp::UpdateRoundingMode:
582
UpdateRoundingMode();
583
break;
584
585
default:
586
INVALIDOP;
587
break;
588
}
589
}
590
591
} // namespace MIPSComp
592
593
#endif
594
595