Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/FileLoaders/RamCachingFileLoader.cpp
3186 views
1
// Copyright (c) 2015- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include <algorithm>
19
#include <thread>
20
#include <cstring>
21
22
#include "Common/Thread/ThreadUtil.h"
23
#include "Common/TimeUtil.h"
24
#include "Common/Log.h"
25
#include "Core/FileLoaders/RamCachingFileLoader.h"
26
27
// Takes ownership of backend.
28
RamCachingFileLoader::RamCachingFileLoader(FileLoader *backend)
29
: ProxiedFileLoader(backend) {
30
filesize_ = backend->FileSize();
31
if (filesize_ > 0) {
32
InitCache();
33
}
34
}
35
36
RamCachingFileLoader::~RamCachingFileLoader() {
37
if (filesize_ > 0) {
38
ShutdownCache();
39
}
40
}
41
42
bool RamCachingFileLoader::Exists() {
43
if (exists_ == -1) {
44
exists_ = ProxiedFileLoader::Exists() ? 1 : 0;
45
}
46
return exists_ == 1;
47
}
48
49
bool RamCachingFileLoader::ExistsFast() {
50
if (exists_ == -1) {
51
return ProxiedFileLoader::ExistsFast();
52
}
53
return exists_ == 1;
54
}
55
56
bool RamCachingFileLoader::IsDirectory() {
57
if (isDirectory_ == -1) {
58
isDirectory_ = ProxiedFileLoader::IsDirectory() ? 1 : 0;
59
}
60
return isDirectory_ == 1;
61
}
62
63
s64 RamCachingFileLoader::FileSize() {
64
return filesize_;
65
}
66
67
size_t RamCachingFileLoader::ReadAt(s64 absolutePos, size_t bytes, void *data, Flags flags) {
68
size_t readSize = 0;
69
if (cache_ == nullptr || (flags & Flags::HINT_UNCACHED) != 0) {
70
readSize = backend_->ReadAt(absolutePos, bytes, data, flags);
71
} else {
72
readSize = ReadFromCache(absolutePos, bytes, data);
73
// While in case the cache size is too small for the entire read.
74
while (readSize < bytes) {
75
SaveIntoCache(absolutePos + readSize, bytes - readSize, flags);
76
size_t bytesFromCache = ReadFromCache(absolutePos + readSize, bytes - readSize, (u8 *)data + readSize);
77
readSize += bytesFromCache;
78
if (bytesFromCache == 0) {
79
// We can't read any more.
80
break;
81
}
82
}
83
84
StartReadAhead(absolutePos + readSize);
85
}
86
return readSize;
87
}
88
89
void RamCachingFileLoader::InitCache() {
90
std::lock_guard<std::mutex> guard(blocksMutex_);
91
u32 blockCount = (u32)((filesize_ + BLOCK_SIZE - 1) >> BLOCK_SHIFT);
92
// Overallocate for the last block.
93
cache_ = (u8 *)malloc((size_t)blockCount << BLOCK_SHIFT);
94
if (cache_ == nullptr) {
95
ERROR_LOG(Log::IO, "Failed to allocate cache for Cache full ISO in RAM! Will fall back to regular reads.");
96
return;
97
}
98
aheadRemaining_ = blockCount;
99
blocks_.resize(blockCount);
100
}
101
102
void RamCachingFileLoader::ShutdownCache() {
103
Cancel();
104
105
// We can't delete while the thread is running, so have to wait.
106
// This should only happen from the menu.
107
if (aheadThread_.joinable())
108
aheadThread_.join();
109
110
_dbg_assert_(!aheadThreadRunning_);
111
112
std::lock_guard<std::mutex> guard(blocksMutex_);
113
blocks_.clear();
114
if (cache_ != nullptr) {
115
free(cache_);
116
cache_ = nullptr;
117
}
118
}
119
120
void RamCachingFileLoader::Cancel() {
121
if (aheadThreadRunning_) {
122
std::lock_guard<std::mutex> guard(blocksMutex_);
123
aheadCancel_ = true;
124
}
125
126
ProxiedFileLoader::Cancel();
127
}
128
129
size_t RamCachingFileLoader::ReadFromCache(s64 pos, size_t bytes, void *data) {
130
s64 cacheStartPos = pos >> BLOCK_SHIFT;
131
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
132
if ((size_t)cacheEndPos >= blocks_.size()) {
133
cacheEndPos = blocks_.size() - 1;
134
}
135
136
size_t readSize = 0;
137
size_t offset = (size_t)(pos - (cacheStartPos << BLOCK_SHIFT));
138
u8 *p = (u8 *)data;
139
140
// Clamp bytes to what's actually available.
141
if (pos + (s64)bytes > filesize_) {
142
// Should've been caught above, but just in case.
143
if (pos >= filesize_) {
144
return 0;
145
}
146
bytes = (size_t)(filesize_ - pos);
147
}
148
149
std::lock_guard<std::mutex> guard(blocksMutex_);
150
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
151
if (blocks_[(size_t)i] == 0) {
152
return readSize;
153
}
154
155
size_t toRead = std::min(bytes - readSize, (size_t)BLOCK_SIZE - offset);
156
s64 cachePos = (i << BLOCK_SHIFT) + offset;
157
memcpy(p + readSize, &cache_[cachePos], toRead);
158
readSize += toRead;
159
160
// Don't need an offset after the first read.
161
offset = 0;
162
}
163
return readSize;
164
}
165
166
void RamCachingFileLoader::SaveIntoCache(s64 pos, size_t bytes, Flags flags) {
167
s64 cacheStartPos = pos >> BLOCK_SHIFT;
168
s64 cacheEndPos = (pos + bytes - 1) >> BLOCK_SHIFT;
169
if ((size_t)cacheEndPos >= blocks_.size()) {
170
cacheEndPos = blocks_.size() - 1;
171
}
172
173
size_t blocksToRead = 0;
174
{
175
std::lock_guard<std::mutex> guard(blocksMutex_);
176
for (s64 i = cacheStartPos; i <= cacheEndPos; ++i) {
177
if (blocks_[(size_t)i] == 0) {
178
++blocksToRead;
179
if (blocksToRead >= MAX_BLOCKS_PER_READ) {
180
break;
181
}
182
183
// TODO: Shouldn't we break as soon as we see a 1?
184
}
185
}
186
}
187
188
s64 cacheFilePos = cacheStartPos << BLOCK_SHIFT;
189
size_t bytesRead = backend_->ReadAt(cacheFilePos, blocksToRead << BLOCK_SHIFT, &cache_[cacheFilePos], flags);
190
191
// In case there was an error, let's not mark blocks that failed to read as read.
192
u32 blocksActuallyRead = (u32)((bytesRead + BLOCK_SIZE - 1) >> BLOCK_SHIFT);
193
{
194
std::lock_guard<std::mutex> guard(blocksMutex_);
195
196
// In case they were simultaneously read.
197
u32 blocksRead = 0;
198
for (size_t i = 0; i < blocksActuallyRead; ++i) {
199
if (blocks_[(size_t)cacheStartPos + i] == 0) {
200
blocks_[(size_t)cacheStartPos + i] = 1;
201
++blocksRead;
202
}
203
}
204
205
if (aheadRemaining_ != 0) {
206
aheadRemaining_ -= blocksRead;
207
}
208
}
209
}
210
211
void RamCachingFileLoader::StartReadAhead(s64 pos) {
212
if (cache_ == nullptr) {
213
return;
214
}
215
216
std::lock_guard<std::mutex> guard(blocksMutex_);
217
aheadPos_ = pos;
218
if (aheadThreadRunning_) {
219
// Already going.
220
return;
221
}
222
223
aheadThreadRunning_ = true;
224
aheadCancel_ = false;
225
if (aheadThread_.joinable())
226
aheadThread_.join();
227
aheadThread_ = std::thread([this] {
228
SetCurrentThreadName("FileLoaderReadAhead");
229
230
AndroidJNIThreadContext jniContext;
231
232
while (aheadRemaining_ != 0 && !aheadCancel_) {
233
// Where should we look?
234
const u32 cacheStartPos = NextAheadBlock();
235
if (cacheStartPos == 0xFFFFFFFF) {
236
// Must be full.
237
break;
238
}
239
u32 cacheEndPos = cacheStartPos + BLOCK_READAHEAD - 1;
240
if (cacheEndPos >= blocks_.size()) {
241
cacheEndPos = (u32)blocks_.size() - 1;
242
}
243
244
for (u32 i = cacheStartPos; i <= cacheEndPos; ++i) {
245
if (blocks_[i] == 0) {
246
SaveIntoCache((u64)i << BLOCK_SHIFT, BLOCK_SIZE * BLOCK_READAHEAD, Flags::NONE);
247
break;
248
}
249
}
250
}
251
252
aheadThreadRunning_ = false;
253
});
254
}
255
256
u32 RamCachingFileLoader::NextAheadBlock() {
257
std::lock_guard<std::mutex> guard(blocksMutex_);
258
259
// If we had an aheadPos_ set, start reading from there and go forward.
260
u32 startFrom = (u32)(aheadPos_ >> BLOCK_SHIFT);
261
// But next time, start from the beginning again.
262
aheadPos_ = 0;
263
264
for (u32 i = startFrom; i < blocks_.size(); ++i) {
265
if (blocks_[i] == 0) {
266
return i;
267
}
268
}
269
270
return 0xFFFFFFFF;
271
}
272
273