Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/MIPS/IR/IRNativeCommon.cpp
5971 views
1
// Copyright (c) 2023- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <atomic>
19
#include <climits>
20
#include <thread>
21
#include "Common/Profiler/Profiler.h"
22
#include "Common/StringUtils.h"
23
#include "Common/TimeUtil.h"
24
#include "Core/Core.h"
25
#include "Core/Debugger/SymbolMap.h"
26
#include "Core/MemMap.h"
27
#include "Core/MIPS/MIPSTables.h"
28
#include "Core/MIPS/IR/IRNativeCommon.h"
29
30
using namespace MIPSComp;
31
32
namespace MIPSComp {
33
34
// Compile time flag to enable debug stats for not compiled ops.
35
static constexpr bool enableDebugStats = false;
36
// Compile time flag for enabling the simple IR jit profiler.
37
static constexpr bool enableDebugProfiler = false;
38
39
// Used only for debugging when enableDebug is true above.
40
static std::map<uint8_t, int> debugSeenNotCompiledIR;
41
static std::map<const char *, int> debugSeenNotCompiled;
42
static std::map<std::pair<uint32_t, IRProfilerStatus>, int> debugSeenPCUsage;
43
static double lastDebugStatsLog = 0.0;
44
static constexpr double debugStatsFrequency = 5.0;
45
46
static std::thread debugProfilerThread;
47
std::atomic<bool> debugProfilerThreadStatus = false;
48
49
template <int N>
50
class IRProfilerTopValues {
51
public:
52
void Add(const std::pair<uint32_t, IRProfilerStatus> &v, int c) {
53
for (int i = 0; i < N; ++i) {
54
if (c > counts[i]) {
55
counts[i] = c;
56
values[i] = v;
57
return;
58
}
59
}
60
}
61
62
int counts[N]{};
63
std::pair<uint32_t, IRProfilerStatus> values[N]{};
64
};
65
66
const char *IRProfilerStatusToString(IRProfilerStatus s) {
67
switch (s) {
68
case IRProfilerStatus::NOT_RUNNING: return "NOT_RUNNING";
69
case IRProfilerStatus::IN_JIT: return "IN_JIT";
70
case IRProfilerStatus::TIMER_ADVANCE: return "TIMER_ADVANCE";
71
case IRProfilerStatus::COMPILING: return "COMPILING";
72
case IRProfilerStatus::MATH_HELPER: return "MATH_HELPER";
73
case IRProfilerStatus::REPLACEMENT: return "REPLACEMENT";
74
case IRProfilerStatus::SYSCALL: return "SYSCALL";
75
case IRProfilerStatus::INTERPRET: return "INTERPRET";
76
case IRProfilerStatus::IR_INTERPRET: return "IR_INTERPRET";
77
}
78
return "INVALID";
79
}
80
81
static void LogDebugStats() {
82
if (!enableDebugStats && !enableDebugProfiler)
83
return;
84
85
double now = time_now_d();
86
if (now < lastDebugStatsLog + debugStatsFrequency)
87
return;
88
lastDebugStatsLog = now;
89
90
int worstIROp = -1;
91
int worstIRVal = 0;
92
for (auto it : debugSeenNotCompiledIR) {
93
if (it.second > worstIRVal) {
94
worstIRVal = it.second;
95
worstIROp = it.first;
96
}
97
}
98
debugSeenNotCompiledIR.clear();
99
100
const char *worstName = nullptr;
101
int worstVal = 0;
102
for (auto it : debugSeenNotCompiled) {
103
if (it.second > worstVal) {
104
worstVal = it.second;
105
worstName = it.first;
106
}
107
}
108
debugSeenNotCompiled.clear();
109
110
IRProfilerTopValues<4> slowestPCs;
111
int64_t totalCount = 0;
112
for (auto it : debugSeenPCUsage) {
113
slowestPCs.Add(it.first, it.second);
114
totalCount += it.second;
115
}
116
debugSeenPCUsage.clear();
117
118
if (worstIROp != -1)
119
WARN_LOG(Log::JIT, "Most not compiled IR op: %s (%d)", GetIRMeta((IROp)worstIROp)->name, worstIRVal);
120
if (worstName != nullptr)
121
WARN_LOG(Log::JIT, "Most not compiled op: %s (%d)", worstName, worstVal);
122
if (slowestPCs.counts[0] != 0) {
123
for (int i = 0; i < 4; ++i) {
124
uint32_t pc = slowestPCs.values[i].first;
125
const char *status = IRProfilerStatusToString(slowestPCs.values[i].second);
126
const std::string label = g_symbolMap ? g_symbolMap->GetDescription(pc) : "";
127
WARN_LOG(Log::JIT, "Slowest sampled PC #%d: %08x (%s)/%s (%f%%)", i, pc, label.c_str(), status, 100.0 * (double)slowestPCs.counts[i] / (double)totalCount);
128
}
129
}
130
}
131
132
bool IRNativeBackend::DebugStatsEnabled() const {
133
return enableDebugStats;
134
}
135
136
bool IRNativeBackend::DebugProfilerEnabled() const {
137
return enableDebugProfiler;
138
}
139
140
void IRNativeBackend::NotifyMIPSInterpret(const char *name) {
141
_assert_(enableDebugStats);
142
debugSeenNotCompiled[name]++;
143
}
144
145
void IRNativeBackend::DoMIPSInst(uint32_t value) {
146
MIPSOpcode op;
147
memcpy(&op, &value, sizeof(op));
148
149
if constexpr (enableDebugStats)
150
debugSeenNotCompiled[MIPSGetName(op)]++;
151
152
MIPSInterpret(op);
153
}
154
155
// This is called from IR->JIT implementation to fall back to the IR interpreter for missing ops.
156
// Not fast.
157
uint32_t IRNativeBackend::DoIRInst(uint64_t value) {
158
IRInst inst[2]{};
159
memcpy(&inst[0], &value, sizeof(value));
160
if constexpr (enableDebugStats)
161
debugSeenNotCompiledIR[(uint8_t)inst[0].op]++;
162
// Doesn't really matter what value it returns as PC.
163
inst[1].op = IROp::ExitToPC;
164
return IRInterpret(currentMIPS, &inst[0]);
165
}
166
167
int IRNativeBackend::ReportBadAddress(uint32_t addr, uint32_t alignment, uint32_t isWrite) {
168
const auto toss = [&](MemoryExceptionType t) {
169
Core_MemoryException(addr, alignment, currentMIPS->pc, t);
170
return coreState != CORE_RUNNING_CPU ? 1 : 0;
171
};
172
173
if (!Memory::IsValidRange(addr, alignment)) {
174
MemoryExceptionType t = isWrite == 1 ? MemoryExceptionType::WRITE_WORD : MemoryExceptionType::READ_WORD;
175
if (alignment > 4)
176
t = isWrite ? MemoryExceptionType::WRITE_BLOCK : MemoryExceptionType::READ_BLOCK;
177
return toss(t);
178
} else if (alignment > 1 && (addr & (alignment - 1)) != 0) {
179
return toss(MemoryExceptionType::ALIGNMENT);
180
}
181
return 0;
182
}
183
184
IRNativeBackend::IRNativeBackend(IRBlockCache &blocks) : blocks_(blocks) {}
185
186
IRNativeBackend::~IRNativeBackend() {
187
if (debugProfilerThreadStatus) {
188
debugProfilerThreadStatus = false;
189
debugProfilerThread.join();
190
}
191
}
192
193
void IRNativeBackend::CompileIRInst(IRInst inst) {
194
switch (inst.op) {
195
case IROp::Nop:
196
break;
197
198
case IROp::SetConst:
199
case IROp::SetConstF:
200
case IROp::Downcount:
201
case IROp::SetPC:
202
case IROp::SetPCConst:
203
CompIR_Basic(inst);
204
break;
205
206
case IROp::Add:
207
case IROp::Sub:
208
case IROp::AddConst:
209
case IROp::SubConst:
210
case IROp::Neg:
211
CompIR_Arith(inst);
212
break;
213
214
case IROp::And:
215
case IROp::Or:
216
case IROp::Xor:
217
case IROp::AndConst:
218
case IROp::OrConst:
219
case IROp::XorConst:
220
case IROp::Not:
221
CompIR_Logic(inst);
222
break;
223
224
case IROp::Mov:
225
case IROp::Ext8to32:
226
case IROp::Ext16to32:
227
CompIR_Assign(inst);
228
break;
229
230
case IROp::ReverseBits:
231
case IROp::BSwap16:
232
case IROp::BSwap32:
233
case IROp::Clz:
234
CompIR_Bits(inst);
235
break;
236
237
case IROp::Shl:
238
case IROp::Shr:
239
case IROp::Sar:
240
case IROp::Ror:
241
case IROp::ShlImm:
242
case IROp::ShrImm:
243
case IROp::SarImm:
244
case IROp::RorImm:
245
CompIR_Shift(inst);
246
break;
247
248
case IROp::Slt:
249
case IROp::SltConst:
250
case IROp::SltU:
251
case IROp::SltUConst:
252
CompIR_Compare(inst);
253
break;
254
255
case IROp::MovZ:
256
case IROp::MovNZ:
257
case IROp::Max:
258
case IROp::Min:
259
CompIR_CondAssign(inst);
260
break;
261
262
case IROp::MtLo:
263
case IROp::MtHi:
264
case IROp::MfLo:
265
case IROp::MfHi:
266
CompIR_HiLo(inst);
267
break;
268
269
case IROp::Mult:
270
case IROp::MultU:
271
case IROp::Madd:
272
case IROp::MaddU:
273
case IROp::Msub:
274
case IROp::MsubU:
275
CompIR_Mult(inst);
276
break;
277
278
case IROp::Div:
279
case IROp::DivU:
280
CompIR_Div(inst);
281
break;
282
283
case IROp::Load8:
284
case IROp::Load8Ext:
285
case IROp::Load16:
286
case IROp::Load16Ext:
287
case IROp::Load32:
288
case IROp::Load32Linked:
289
CompIR_Load(inst);
290
break;
291
292
case IROp::Load32Left:
293
case IROp::Load32Right:
294
CompIR_LoadShift(inst);
295
break;
296
297
case IROp::LoadFloat:
298
CompIR_FLoad(inst);
299
break;
300
301
case IROp::LoadVec4:
302
CompIR_VecLoad(inst);
303
break;
304
305
case IROp::Store8:
306
case IROp::Store16:
307
case IROp::Store32:
308
CompIR_Store(inst);
309
break;
310
311
case IROp::Store32Conditional:
312
CompIR_CondStore(inst);
313
break;
314
315
case IROp::Store32Left:
316
case IROp::Store32Right:
317
CompIR_StoreShift(inst);
318
break;
319
320
case IROp::StoreFloat:
321
CompIR_FStore(inst);
322
break;
323
324
case IROp::StoreVec4:
325
CompIR_VecStore(inst);
326
break;
327
328
case IROp::FAdd:
329
case IROp::FSub:
330
case IROp::FMul:
331
case IROp::FDiv:
332
case IROp::FSqrt:
333
case IROp::FNeg:
334
CompIR_FArith(inst);
335
break;
336
337
case IROp::FMin:
338
case IROp::FMax:
339
CompIR_FCondAssign(inst);
340
break;
341
342
case IROp::FMov:
343
case IROp::FAbs:
344
case IROp::FSign:
345
CompIR_FAssign(inst);
346
break;
347
348
case IROp::FRound:
349
case IROp::FTrunc:
350
case IROp::FCeil:
351
case IROp::FFloor:
352
CompIR_FRound(inst);
353
break;
354
355
case IROp::FCvtWS:
356
case IROp::FCvtSW:
357
case IROp::FCvtScaledWS:
358
case IROp::FCvtScaledSW:
359
CompIR_FCvt(inst);
360
break;
361
362
case IROp::FSat0_1:
363
case IROp::FSatMinus1_1:
364
CompIR_FSat(inst);
365
break;
366
367
case IROp::FCmp:
368
case IROp::FCmovVfpuCC:
369
case IROp::FCmpVfpuBit:
370
case IROp::FCmpVfpuAggregate:
371
CompIR_FCompare(inst);
372
break;
373
374
case IROp::RestoreRoundingMode:
375
case IROp::ApplyRoundingMode:
376
case IROp::UpdateRoundingMode:
377
CompIR_RoundingMode(inst);
378
break;
379
380
case IROp::SetCtrlVFPU:
381
case IROp::SetCtrlVFPUReg:
382
case IROp::SetCtrlVFPUFReg:
383
case IROp::FpCondFromReg:
384
case IROp::FpCondToReg:
385
case IROp::FpCtrlFromReg:
386
case IROp::FpCtrlToReg:
387
case IROp::VfpuCtrlToReg:
388
case IROp::FMovFromGPR:
389
case IROp::FMovToGPR:
390
CompIR_Transfer(inst);
391
break;
392
393
case IROp::Vec4Init:
394
case IROp::Vec4Shuffle:
395
case IROp::Vec4Blend:
396
case IROp::Vec4Mov:
397
CompIR_VecAssign(inst);
398
break;
399
400
case IROp::Vec4Add:
401
case IROp::Vec4Sub:
402
case IROp::Vec4Mul:
403
case IROp::Vec4Div:
404
case IROp::Vec4Scale:
405
case IROp::Vec4Neg:
406
case IROp::Vec4Abs:
407
CompIR_VecArith(inst);
408
break;
409
410
case IROp::Vec4Dot:
411
CompIR_VecHoriz(inst);
412
break;
413
414
case IROp::Vec2Unpack16To31:
415
case IROp::Vec2Unpack16To32:
416
case IROp::Vec4Unpack8To32:
417
case IROp::Vec4DuplicateUpperBitsAndShift1:
418
case IROp::Vec4Pack31To8:
419
case IROp::Vec4Pack32To8:
420
case IROp::Vec2Pack31To16:
421
case IROp::Vec2Pack32To16:
422
CompIR_VecPack(inst);
423
break;
424
425
case IROp::Vec4ClampToZero:
426
case IROp::Vec2ClampToZero:
427
CompIR_VecClamp(inst);
428
break;
429
430
case IROp::FSin:
431
case IROp::FCos:
432
case IROp::FRSqrt:
433
case IROp::FRecip:
434
case IROp::FAsin:
435
CompIR_FSpecial(inst);
436
break;
437
438
case IROp::Interpret:
439
CompIR_Interpret(inst);
440
break;
441
442
case IROp::Syscall:
443
case IROp::CallReplacement:
444
case IROp::Break:
445
CompIR_System(inst);
446
break;
447
448
case IROp::Breakpoint:
449
case IROp::MemoryCheck:
450
CompIR_Breakpoint(inst);
451
break;
452
453
case IROp::ValidateAddress8:
454
case IROp::ValidateAddress16:
455
case IROp::ValidateAddress32:
456
case IROp::ValidateAddress128:
457
CompIR_ValidateAddress(inst);
458
break;
459
460
case IROp::ExitToConst:
461
case IROp::ExitToReg:
462
case IROp::ExitToPC:
463
CompIR_Exit(inst);
464
break;
465
466
case IROp::ExitToConstIfEq:
467
case IROp::ExitToConstIfNeq:
468
case IROp::ExitToConstIfGtZ:
469
case IROp::ExitToConstIfGeZ:
470
case IROp::ExitToConstIfLtZ:
471
case IROp::ExitToConstIfLeZ:
472
case IROp::ExitToConstIfFpTrue:
473
case IROp::ExitToConstIfFpFalse:
474
CompIR_ExitIf(inst);
475
break;
476
477
default:
478
_assert_msg_(false, "Unexpected IR op %d", (int)inst.op);
479
CompIR_Generic(inst);
480
break;
481
}
482
}
483
484
IRNativeJit::IRNativeJit(MIPSState *mipsState)
485
: IRJit(mipsState, true), debugInterface_(blocks_) {}
486
487
void IRNativeJit::Init(IRNativeBackend &backend) {
488
backend_ = &backend;
489
debugInterface_.Init(backend_);
490
backend_->GenerateFixedCode(mips_);
491
492
// Wanted this to be a reference, but vtbls get in the way. Shouldn't change.
493
hooks_ = backend.GetNativeHooks();
494
495
if (enableDebugProfiler && hooks_.profilerPC) {
496
debugProfilerThreadStatus = true;
497
debugProfilerThread = std::thread([&] {
498
// Spin, spin spin... maybe could at least hook into sleeps.
499
while (debugProfilerThreadStatus) {
500
IRProfilerStatus stat = *hooks_.profilerStatus;
501
uint32_t pc = *hooks_.profilerPC;
502
if (stat != IRProfilerStatus::NOT_RUNNING && stat != IRProfilerStatus::SYSCALL) {
503
debugSeenPCUsage[std::make_pair(pc, stat)]++;
504
}
505
}
506
});
507
}
508
}
509
510
bool IRNativeJit::CompileNativeBlock(IRBlockCache *irblockCache, int block_num) {
511
return backend_->CompileBlock(irblockCache, block_num);
512
}
513
514
void IRNativeJit::FinalizeNativeBlock(IRBlockCache *irblockCache, int block_num) {
515
backend_->FinalizeBlock(irblockCache, block_num, jo);
516
}
517
518
void IRNativeJit::RunLoopUntil(u64 globalticks) {
519
if constexpr (enableDebugStats || enableDebugProfiler) {
520
LogDebugStats();
521
}
522
523
PROFILE_THIS_SCOPE("jit");
524
hooks_.enterDispatcher();
525
}
526
527
void IRNativeJit::ClearCache() {
528
IRJit::ClearCache();
529
backend_->ClearAllBlocks();
530
}
531
532
bool IRNativeJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
533
if (ptr != nullptr && backend_->DescribeCodePtr(ptr, name))
534
return true;
535
536
int offset = backend_->OffsetFromCodePtr(ptr);
537
if (offset == -1)
538
return false;
539
540
int block_num = -1;
541
int block_offset = INT_MAX;
542
for (int i = 0; i < blocks_.GetNumBlocks(); ++i) {
543
const auto &b = blocks_.GetBlock(i);
544
int b_start = b->GetNativeOffset();
545
if (b_start > offset)
546
continue;
547
548
int b_end = backend_->GetNativeBlock(i)->checkedOffset;
549
int b_offset = offset - b_start;
550
if (b_end > b_start && b_end >= offset) {
551
// For sure within the block.
552
block_num = i;
553
block_offset = b_offset;
554
break;
555
}
556
557
if (b_offset < block_offset) {
558
// Possibly within the block, unless in some other block...
559
block_num = i;
560
block_offset = b_offset;
561
}
562
}
563
564
// Used by profiling tools that don't like spaces.
565
if (block_num == -1) {
566
name = "unknownOrDeletedBlock";
567
return true;
568
}
569
570
const IRBlock *block = blocks_.GetBlock(block_num);
571
if (block) {
572
u32 start = 0, size = 0;
573
block->GetRange(&start, &size);
574
575
// It helps to know which func this block is inside.
576
const std::string label = g_symbolMap ? g_symbolMap->GetDescription(start) : "";
577
if (!label.empty())
578
name = StringFromFormat("block%d_%08x_%s_0x%x", block_num, start, label.c_str(), block_offset);
579
else
580
name = StringFromFormat("block%d_%08x_0x%x", block_num, start, block_offset);
581
return true;
582
}
583
return false;
584
}
585
586
bool IRNativeJit::CodeInRange(const u8 *ptr) const {
587
return backend_->CodeInRange(ptr);
588
}
589
590
const u8 *IRNativeJit::GetCodeBase() const {
591
return backend_->CodeBlock().GetBasePtr();
592
}
593
594
bool IRNativeJit::IsAtDispatchFetch(const u8 *ptr) const {
595
return ptr == backend_->GetNativeHooks().dispatchFetch;
596
}
597
598
const u8 *IRNativeJit::GetDispatcher() const {
599
return backend_->GetNativeHooks().dispatcher;
600
}
601
602
const u8 *IRNativeJit::GetCrashHandler() const {
603
return backend_->GetNativeHooks().crashHandler;
604
}
605
606
void IRNativeJit::UpdateFCR31() {
607
backend_->UpdateFCR31(mips_);
608
}
609
610
JitBlockCacheDebugInterface *IRNativeJit::GetBlockCacheDebugInterface() {
611
return &debugInterface_;
612
}
613
614
bool IRNativeBackend::CodeInRange(const u8 *ptr) const {
615
return CodeBlock().IsInSpace(ptr);
616
}
617
618
bool IRNativeBackend::DescribeCodePtr(const u8 *ptr, std::string &name) const {
619
if (!CodeBlock().IsInSpace(ptr))
620
return false;
621
622
// Used in disassembly viewer.
623
if (ptr == (const uint8_t *)hooks_.enterDispatcher) {
624
name = "enterDispatcher";
625
} else if (ptr == hooks_.dispatcher) {
626
name = "dispatcher";
627
} else if (ptr == hooks_.dispatchFetch) {
628
name = "dispatchFetch";
629
} else if (ptr == hooks_.crashHandler) {
630
name = "crashHandler";
631
} else {
632
return false;
633
}
634
return true;
635
}
636
637
int IRNativeBackend::OffsetFromCodePtr(const u8 *ptr) {
638
auto &codeBlock = CodeBlock();
639
if (!codeBlock.IsInSpace(ptr))
640
return -1;
641
return (int)codeBlock.GetOffset(ptr);
642
}
643
644
void IRNativeBackend::FinalizeBlock(IRBlockCache *irBlockCache, int block_num, const JitOptions &jo) {
645
IRBlock *block = irBlockCache->GetBlock(block_num);
646
if (jo.enableBlocklink) {
647
uint32_t pc = block->GetOriginalStart();
648
649
// First, link other blocks to this one now that it's finalized.
650
auto incoming = linksTo_.equal_range(pc);
651
for (auto it = incoming.first; it != incoming.second; ++it) {
652
auto &exits = nativeBlocks_[it->second].exits;
653
for (auto &blockExit : exits) {
654
if (blockExit.dest == pc)
655
OverwriteExit(blockExit.offset, blockExit.len, block_num);
656
}
657
}
658
659
// And also any blocks from this one, in case we're finalizing it later.
660
auto &outgoing = nativeBlocks_[block_num].exits;
661
for (auto &blockExit : outgoing) {
662
int dstBlockNum = blocks_.GetBlockNumberFromStartAddress(blockExit.dest);
663
const IRNativeBlock *nativeBlock = GetNativeBlock(dstBlockNum);
664
if (nativeBlock)
665
OverwriteExit(blockExit.offset, blockExit.len, dstBlockNum);
666
}
667
}
668
}
669
670
const IRNativeBlock *IRNativeBackend::GetNativeBlock(int block_num) const {
671
if (block_num < 0 || block_num >= (int)nativeBlocks_.size())
672
return nullptr;
673
return &nativeBlocks_[block_num];
674
}
675
676
void IRNativeBackend::SetBlockCheckedOffset(int block_num, int offset) {
677
if (block_num >= (int)nativeBlocks_.size())
678
nativeBlocks_.resize(block_num + 1);
679
680
nativeBlocks_[block_num].checkedOffset = offset;
681
}
682
683
void IRNativeBackend::AddLinkableExit(int block_num, uint32_t pc, int exitStartOffset, int exitLen) {
684
linksTo_.emplace(pc, block_num);
685
686
if (block_num >= (int)nativeBlocks_.size())
687
nativeBlocks_.resize(block_num + 1);
688
IRNativeBlockExit blockExit;
689
blockExit.offset = exitStartOffset;
690
blockExit.len = exitLen;
691
blockExit.dest = pc;
692
nativeBlocks_[block_num].exits.push_back(blockExit);
693
}
694
695
void IRNativeBackend::EraseAllLinks(int block_num) {
696
if (block_num == -1) {
697
linksTo_.clear();
698
nativeBlocks_.clear();
699
} else {
700
linksTo_.erase(block_num);
701
if (block_num < (int)nativeBlocks_.size())
702
nativeBlocks_[block_num].exits.clear();
703
}
704
}
705
706
IRNativeBlockCacheDebugInterface::IRNativeBlockCacheDebugInterface(const IRBlockCache &irBlocks)
707
: irBlocks_(irBlocks) {}
708
709
void IRNativeBlockCacheDebugInterface::Init(const IRNativeBackend *backend) {
710
codeBlock_ = &backend->CodeBlock();
711
backend_ = backend;
712
}
713
714
bool IRNativeBlockCacheDebugInterface::IsValidBlock(int blockNum) const {
715
return irBlocks_.IsValidBlock(blockNum);
716
}
717
718
JitBlockMeta IRNativeBlockCacheDebugInterface::GetBlockMeta(int blockNum) const {
719
return irBlocks_.GetBlockMeta(blockNum);
720
}
721
722
int IRNativeBlockCacheDebugInterface::GetNumBlocks() const {
723
return irBlocks_.GetNumBlocks();
724
}
725
726
int IRNativeBlockCacheDebugInterface::GetBlockNumberFromStartAddress(u32 em_address) const {
727
return irBlocks_.GetBlockNumberFromStartAddress(em_address);
728
}
729
730
JitBlockProfileStats IRNativeBlockCacheDebugInterface::GetBlockProfileStats(int blockNum) const {
731
return irBlocks_.GetBlockProfileStats(blockNum);
732
}
733
734
void IRNativeBlockCacheDebugInterface::GetBlockCodeRange(int blockNum, int *startOffset, int *size) const {
735
int blockOffset = irBlocks_.GetBlock(blockNum)->GetNativeOffset();
736
int endOffset = backend_->GetNativeBlock(blockNum)->checkedOffset;
737
738
// If endOffset is before, the checked entry is before the block start.
739
if (endOffset < blockOffset) {
740
// We assume linear allocation. Maybe a bit dangerous, should always be right.
741
if (blockNum + 1 >= GetNumBlocks()) {
742
// Last block, get from current code pointer.
743
endOffset = (int)codeBlock_->GetOffset(codeBlock_->GetCodePtr());
744
} else {
745
endOffset = irBlocks_.GetBlock(blockNum + 1)->GetNativeOffset();
746
_assert_msg_(endOffset >= blockOffset, "Next block not sequential, block=%d/%08x, next=%d/%08x", blockNum, blockOffset, blockNum + 1, endOffset);
747
}
748
}
749
750
*startOffset = blockOffset;
751
*size = endOffset - blockOffset;
752
}
753
754
JitBlockDebugInfo IRNativeBlockCacheDebugInterface::GetBlockDebugInfo(int blockNum) const {
755
JitBlockDebugInfo debugInfo = irBlocks_.GetBlockDebugInfo(blockNum);
756
757
int blockOffset, codeSize;
758
GetBlockCodeRange(blockNum, &blockOffset, &codeSize);
759
760
const u8 *blockStart = codeBlock_->GetBasePtr() + blockOffset;
761
#if PPSSPP_ARCH(ARM)
762
debugInfo.targetDisasm = DisassembleArm2(blockStart, codeSize);
763
#elif PPSSPP_ARCH(ARM64)
764
debugInfo.targetDisasm = DisassembleArm64(blockStart, codeSize);
765
#elif PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
766
debugInfo.targetDisasm = DisassembleX86(blockStart, codeSize);
767
#elif PPSSPP_ARCH(RISCV64)
768
debugInfo.targetDisasm = DisassembleRV64(blockStart, codeSize);
769
#elif PPSSPP_ARCH(LOONGARCH64)
770
debugInfo.targetDisasm = DisassembleLA64(blockStart, codeSize);
771
#endif
772
return debugInfo;
773
}
774
775
void IRNativeBlockCacheDebugInterface::ComputeStats(BlockCacheStats &bcStats) const {
776
double totalBloat = 0.0;
777
double maxBloat = 0.0;
778
double minBloat = 1000000000.0;
779
int numBlocks = GetNumBlocks();
780
for (int i = 0; i < numBlocks; ++i) {
781
const IRBlock &b = *irBlocks_.GetBlock(i);
782
783
// Native size, not IR size.
784
int blockOffset, codeSize;
785
GetBlockCodeRange(i, &blockOffset, &codeSize);
786
if (codeSize == 0)
787
continue;
788
789
// MIPS (PSP) size.
790
u32 origAddr, origSize;
791
b.GetRange(&origAddr, &origSize);
792
793
double bloat = (double)codeSize / (double)origSize;
794
if (bloat < minBloat) {
795
minBloat = bloat;
796
bcStats.minBloatBlock = origAddr;
797
}
798
if (bloat > maxBloat) {
799
maxBloat = bloat;
800
bcStats.maxBloatBlock = origAddr;
801
}
802
totalBloat += bloat;
803
}
804
bcStats.numBlocks = numBlocks;
805
bcStats.minBloat = (float)minBloat;
806
bcStats.maxBloat = (float)maxBloat;
807
bcStats.avgBloat = (float)(totalBloat / (double)numBlocks);
808
}
809
810
} // namespace MIPSComp
811
812