Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/GPU/Vulkan/ShaderManagerVulkan.cpp
3186 views
1
// Copyright (c) 2015- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#ifdef _WIN32
19
//#define SHADERLOG
20
#endif
21
22
#include "Common/LogReporting.h"
23
#include "Common/Profiler/Profiler.h"
24
#include "Common/GPU/thin3d.h"
25
#include "Common/MemoryUtil.h"
26
27
#include "Common/StringUtils.h"
28
#include "Common/GPU/Vulkan/VulkanContext.h"
29
#include "Common/Log.h"
30
#include "Common/TimeUtil.h"
31
#include "GPU/GPUState.h"
32
#include "GPU/Common/FragmentShaderGenerator.h"
33
#include "GPU/Common/VertexShaderGenerator.h"
34
#include "GPU/Common/GeometryShaderGenerator.h"
35
#include "GPU/Vulkan/ShaderManagerVulkan.h"
36
#include "GPU/Vulkan/DrawEngineVulkan.h"
37
38
// Most drivers treat vkCreateShaderModule as pretty much a memcpy. What actually
39
// takes time here, and makes this worthy of parallelization, is GLSLtoSPV.
40
// Takes ownership over tag.
41
// This always returns something, checking the return value for null is not meaningful.
42
static Promise<VkShaderModule> *CompileShaderModuleAsync(VulkanContext *vulkan, VkShaderStageFlagBits stage, const char *code, std::string *tag) {
43
auto compile = [=] {
44
PROFILE_THIS_SCOPE("shadercomp");
45
46
std::string errorMessage;
47
std::vector<uint32_t> spirv;
48
49
bool success = GLSLtoSPV(stage, code, GLSLVariant::VULKAN, spirv, &errorMessage);
50
51
if (!errorMessage.empty()) {
52
if (success) {
53
ERROR_LOG(Log::G3D, "Warnings in shader compilation!");
54
} else {
55
ERROR_LOG(Log::G3D, "Error in shader compilation!");
56
}
57
std::string numberedSource = LineNumberString(code);
58
ERROR_LOG(Log::G3D, "Messages: %s", errorMessage.c_str());
59
ERROR_LOG(Log::G3D, "Shader source:\n%s", numberedSource.c_str());
60
#if PPSSPP_PLATFORM(WINDOWS)
61
OutputDebugStringA("Error messages:\n");
62
OutputDebugStringA(errorMessage.c_str());
63
OutputDebugStringA(numberedSource.c_str());
64
#endif
65
Reporting::ReportMessage("Vulkan error in shader compilation: info: %s / code: %s", errorMessage.c_str(), code);
66
}
67
68
VkShaderModule shaderModule = VK_NULL_HANDLE;
69
if (success) {
70
const char *createTag = tag ? tag->c_str() : nullptr;
71
if (!createTag) {
72
switch (stage) {
73
case VK_SHADER_STAGE_VERTEX_BIT: createTag = "game_vertex"; break;
74
case VK_SHADER_STAGE_FRAGMENT_BIT: createTag = "game_fragment"; break;
75
case VK_SHADER_STAGE_GEOMETRY_BIT: createTag = "game_geometry"; break;
76
case VK_SHADER_STAGE_COMPUTE_BIT: createTag = "game_compute"; break;
77
default: break;
78
}
79
}
80
81
success = vulkan->CreateShaderModule(spirv, &shaderModule, createTag);
82
#ifdef SHADERLOG
83
OutputDebugStringA("OK");
84
#endif
85
delete tag;
86
}
87
return shaderModule;
88
};
89
90
#if defined(_DEBUG)
91
// Don't parallelize in debug mode, pathological behavior due to mutex locks in allocator which is HEAVILY used by glslang.
92
bool singleThreaded = true;
93
#else
94
bool singleThreaded = false;
95
#endif
96
97
if (singleThreaded) {
98
return Promise<VkShaderModule>::AlreadyDone(compile());
99
} else {
100
return Promise<VkShaderModule>::Spawn(&g_threadManager, compile, TaskType::DEDICATED_THREAD);
101
}
102
}
103
104
VulkanFragmentShader::VulkanFragmentShader(VulkanContext *vulkan, FShaderID id, FragmentShaderFlags flags, const char *code)
105
: vulkan_(vulkan), id_(id), flags_(flags) {
106
_assert_(!id.is_invalid());
107
source_ = code;
108
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_FRAGMENT_BIT, source_.c_str(), new std::string(FragmentShaderDesc(id)));
109
VERBOSE_LOG(Log::G3D, "Compiled fragment shader:\n%s\n", (const char *)code);
110
}
111
112
VulkanFragmentShader::~VulkanFragmentShader() {
113
if (module_) {
114
VkShaderModule shaderModule = module_->BlockUntilReady();
115
if (shaderModule) {
116
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
117
}
118
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
119
auto module = (Promise<VkShaderModule> *)m;
120
delete module;
121
}, module_);
122
}
123
}
124
125
std::string VulkanFragmentShader::GetShaderString(DebugShaderStringType type) const {
126
switch (type) {
127
case SHADER_STRING_SOURCE_CODE:
128
return source_;
129
case SHADER_STRING_SHORT_DESC:
130
return FragmentShaderDesc(id_);
131
default:
132
return "N/A";
133
}
134
}
135
136
VulkanVertexShader::VulkanVertexShader(VulkanContext *vulkan, VShaderID id, VertexShaderFlags flags, const char *code, bool useHWTransform)
137
: vulkan_(vulkan), useHWTransform_(useHWTransform), flags_(flags), id_(id) {
138
_assert_(!id.is_invalid());
139
source_ = code;
140
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_VERTEX_BIT, source_.c_str(), new std::string(VertexShaderDesc(id)));
141
VERBOSE_LOG(Log::G3D, "Compiled vertex shader:\n%s\n", (const char *)code);
142
}
143
144
VulkanVertexShader::~VulkanVertexShader() {
145
if (module_) {
146
VkShaderModule shaderModule = module_->BlockUntilReady();
147
if (shaderModule) {
148
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
149
}
150
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
151
auto module = (Promise<VkShaderModule> *)m;
152
delete module;
153
}, module_);
154
}
155
}
156
157
std::string VulkanVertexShader::GetShaderString(DebugShaderStringType type) const {
158
switch (type) {
159
case SHADER_STRING_SOURCE_CODE:
160
return source_;
161
case SHADER_STRING_SHORT_DESC:
162
return VertexShaderDesc(id_);
163
default:
164
return "N/A";
165
}
166
}
167
168
VulkanGeometryShader::VulkanGeometryShader(VulkanContext *vulkan, GShaderID id, const char *code)
169
: vulkan_(vulkan), id_(id) {
170
_assert_(!id.is_invalid());
171
source_ = code;
172
module_ = CompileShaderModuleAsync(vulkan, VK_SHADER_STAGE_GEOMETRY_BIT, source_.c_str(), new std::string(GeometryShaderDesc(id).c_str()));
173
VERBOSE_LOG(Log::G3D, "Compiled geometry shader:\n%s\n", (const char *)code);
174
}
175
176
VulkanGeometryShader::~VulkanGeometryShader() {
177
if (module_) {
178
VkShaderModule shaderModule = module_->BlockUntilReady();
179
if (shaderModule) {
180
vulkan_->Delete().QueueDeleteShaderModule(shaderModule);
181
}
182
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *m) {
183
auto module = (Promise<VkShaderModule> *)m;
184
delete module;
185
}, module_);
186
}
187
}
188
189
std::string VulkanGeometryShader::GetShaderString(DebugShaderStringType type) const {
190
switch (type) {
191
case SHADER_STRING_SOURCE_CODE:
192
return source_;
193
case SHADER_STRING_SHORT_DESC:
194
return GeometryShaderDesc(id_);
195
default:
196
return "N/A";
197
}
198
}
199
200
static constexpr size_t CODE_BUFFER_SIZE = 32768;
201
202
ShaderManagerVulkan::ShaderManagerVulkan(Draw::DrawContext *draw)
203
: ShaderManagerCommon(draw), compat_(GLSL_VULKAN), fsCache_(16), vsCache_(16), gsCache_(16) {
204
codeBuffer_ = new char[CODE_BUFFER_SIZE];
205
VulkanContext *vulkan = (VulkanContext *)draw->GetNativeObject(Draw::NativeObject::CONTEXT);
206
uboAlignment_ = vulkan->GetPhysicalDeviceProperties().properties.limits.minUniformBufferOffsetAlignment;
207
208
uniforms_ = (Uniforms *)AllocateAlignedMemory(sizeof(Uniforms), 16);
209
_assert_(uniforms_);
210
211
static_assert(sizeof(uniforms_->ub_base) <= 512, "ub_base grew too big");
212
static_assert(sizeof(uniforms_->ub_lights) <= 512, "ub_lights grew too big");
213
static_assert(sizeof(uniforms_->ub_bones) <= 384, "ub_bones grew too big");
214
}
215
216
ShaderManagerVulkan::~ShaderManagerVulkan() {
217
FreeAlignedMemory(uniforms_);
218
Clear();
219
delete[] codeBuffer_;
220
}
221
222
void ShaderManagerVulkan::DeviceLost() {
223
Clear();
224
draw_ = nullptr;
225
}
226
227
void ShaderManagerVulkan::DeviceRestore(Draw::DrawContext *draw) {
228
VulkanContext *vulkan = (VulkanContext *)draw->GetNativeObject(Draw::NativeObject::CONTEXT);
229
draw_ = draw;
230
uboAlignment_ = vulkan->GetPhysicalDeviceProperties().properties.limits.minUniformBufferOffsetAlignment;
231
}
232
233
void ShaderManagerVulkan::Clear() {
234
fsCache_.Iterate([&](const FShaderID &key, VulkanFragmentShader *shader) {
235
delete shader;
236
});
237
vsCache_.Iterate([&](const VShaderID &key, VulkanVertexShader *shader) {
238
delete shader;
239
});
240
gsCache_.Iterate([&](const GShaderID &key, VulkanGeometryShader *shader) {
241
delete shader;
242
});
243
fsCache_.Clear();
244
vsCache_.Clear();
245
gsCache_.Clear();
246
lastFSID_.set_invalid();
247
lastVSID_.set_invalid();
248
lastGSID_.set_invalid();
249
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
250
}
251
252
void ShaderManagerVulkan::ClearShaders() {
253
Clear();
254
DirtyLastShader();
255
gstate_c.Dirty(DIRTY_ALL_UNIFORMS | DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
256
}
257
258
void ShaderManagerVulkan::DirtyLastShader() {
259
// Forget the last shader ID
260
lastFSID_.set_invalid();
261
lastVSID_.set_invalid();
262
lastGSID_.set_invalid();
263
lastVShader_ = nullptr;
264
lastFShader_ = nullptr;
265
lastGShader_ = nullptr;
266
gstate_c.Dirty(DIRTY_VERTEXSHADER_STATE | DIRTY_FRAGMENTSHADER_STATE | DIRTY_GEOMETRYSHADER_STATE);
267
}
268
269
uint64_t ShaderManagerVulkan::UpdateUniforms(bool useBufferedRendering) {
270
uint64_t dirty = gstate_c.GetDirtyUniforms();
271
if (dirty != 0) {
272
if (dirty & DIRTY_BASE_UNIFORMS)
273
BaseUpdateUniforms(&uniforms_->ub_base, dirty, false, useBufferedRendering);
274
if (dirty & DIRTY_LIGHT_UNIFORMS)
275
LightUpdateUniforms(&uniforms_->ub_lights, dirty);
276
if (dirty & DIRTY_BONE_UNIFORMS)
277
BoneUpdateUniforms(&uniforms_->ub_bones, dirty);
278
}
279
gstate_c.CleanUniforms();
280
return dirty;
281
}
282
283
void ShaderManagerVulkan::GetShaders(int prim, u32 vertexType, VulkanVertexShader **vshader, VulkanFragmentShader **fshader, VulkanGeometryShader **gshader, const ComputedPipelineState &pipelineState, bool useHWTransform, bool useHWTessellation, bool weightsAsFloat, bool useSkinInDecode) {
284
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
285
286
VShaderID VSID;
287
VulkanVertexShader *vs = nullptr;
288
if (gstate_c.IsDirty(DIRTY_VERTEXSHADER_STATE)) {
289
gstate_c.Clean(DIRTY_VERTEXSHADER_STATE);
290
ComputeVertexShaderID(&VSID, vertexType, useHWTransform, useHWTessellation, weightsAsFloat, useSkinInDecode);
291
if (VSID == lastVSID_) {
292
_dbg_assert_(lastVShader_ != nullptr);
293
vs = lastVShader_;
294
} else if (!vsCache_.Get(VSID, &vs)) {
295
// Vertex shader not in cache. Let's compile it.
296
std::string genErrorString;
297
uint64_t uniformMask = 0; // Not used
298
uint32_t attributeMask = 0; // Not used
299
VertexShaderFlags flags{};
300
bool success = GenerateVertexShader(VSID, codeBuffer_, compat_, draw_->GetBugs(), &attributeMask, &uniformMask, &flags, &genErrorString);
301
_assert_msg_(success, "VS gen error: %s", genErrorString.c_str());
302
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
303
304
// Don't need to re-lookup anymore, now that we lock wider.
305
vs = new VulkanVertexShader(vulkan, VSID, flags, codeBuffer_, useHWTransform);
306
vsCache_.Insert(VSID, vs);
307
}
308
lastVShader_ = vs;
309
lastVSID_ = VSID;
310
} else {
311
VSID = lastVSID_;
312
vs = lastVShader_;
313
}
314
*vshader = vs;
315
316
FShaderID FSID;
317
VulkanFragmentShader *fs = nullptr;
318
if (gstate_c.IsDirty(DIRTY_FRAGMENTSHADER_STATE)) {
319
gstate_c.Clean(DIRTY_FRAGMENTSHADER_STATE);
320
ComputeFragmentShaderID(&FSID, pipelineState, draw_->GetBugs());
321
if (FSID == lastFSID_) {
322
_dbg_assert_(lastFShader_ != nullptr);
323
fs = lastFShader_;
324
} else if (!fsCache_.Get(FSID, &fs)) {
325
// Fragment shader not in cache. Let's compile it.
326
std::string genErrorString;
327
uint64_t uniformMask = 0; // Not used
328
FragmentShaderFlags flags{};
329
bool success = GenerateFragmentShader(FSID, codeBuffer_, compat_, draw_->GetBugs(), &uniformMask, &flags, &genErrorString);
330
_assert_msg_(success, "FS gen error: %s", genErrorString.c_str());
331
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
332
333
fs = new VulkanFragmentShader(vulkan, FSID, flags, codeBuffer_);
334
fsCache_.Insert(FSID, fs);
335
}
336
lastFShader_ = fs;
337
lastFSID_ = FSID;
338
} else {
339
FSID = lastFSID_;
340
fs = lastFShader_;
341
}
342
*fshader = fs;
343
344
GShaderID GSID;
345
VulkanGeometryShader *gs = nullptr;
346
if (gstate_c.IsDirty(DIRTY_GEOMETRYSHADER_STATE)) {
347
gstate_c.Clean(DIRTY_GEOMETRYSHADER_STATE);
348
ComputeGeometryShaderID(&GSID, draw_->GetBugs(), prim);
349
if (GSID == lastGSID_) {
350
// it's ok for this to be null.
351
gs = lastGShader_;
352
} else if (GSID.Bit(GS_BIT_ENABLED)) {
353
if (!gsCache_.Get(GSID, &gs)) {
354
// Geometry shader not in cache. Let's compile it.
355
std::string genErrorString;
356
bool success = GenerateGeometryShader(GSID, codeBuffer_, compat_, draw_->GetBugs(), &genErrorString);
357
_assert_msg_(success, "GS gen error: %s", genErrorString.c_str());
358
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
359
360
gs = new VulkanGeometryShader(vulkan, GSID, codeBuffer_);
361
gsCache_.Insert(GSID, gs);
362
}
363
} else {
364
gs = nullptr;
365
}
366
lastGShader_ = gs;
367
lastGSID_ = GSID;
368
} else {
369
GSID = lastGSID_;
370
gs = lastGShader_;
371
}
372
*gshader = gs;
373
374
_dbg_assert_(FSID.Bit(FS_BIT_FLATSHADE) == VSID.Bit(VS_BIT_FLATSHADE));
375
_dbg_assert_(FSID.Bit(FS_BIT_LMODE) == VSID.Bit(VS_BIT_LMODE));
376
if (GSID.Bit(GS_BIT_ENABLED)) {
377
_dbg_assert_(GSID.Bit(GS_BIT_LMODE) == VSID.Bit(VS_BIT_LMODE));
378
}
379
380
_dbg_assert_msg_((*vshader)->UseHWTransform() == useHWTransform, "Bad vshader was computed");
381
}
382
383
std::vector<std::string> ShaderManagerVulkan::DebugGetShaderIDs(DebugShaderType type) {
384
std::vector<std::string> ids;
385
switch (type) {
386
case SHADER_TYPE_VERTEX:
387
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *shader) {
388
std::string idstr;
389
id.ToString(&idstr);
390
ids.push_back(idstr);
391
});
392
break;
393
case SHADER_TYPE_FRAGMENT:
394
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *shader) {
395
std::string idstr;
396
id.ToString(&idstr);
397
ids.push_back(idstr);
398
});
399
break;
400
case SHADER_TYPE_GEOMETRY:
401
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *shader) {
402
std::string idstr;
403
id.ToString(&idstr);
404
ids.push_back(idstr);
405
});
406
break;
407
default:
408
break;
409
}
410
return ids;
411
}
412
413
std::string ShaderManagerVulkan::DebugGetShaderString(std::string id, DebugShaderType type, DebugShaderStringType stringType) {
414
ShaderID shaderId;
415
shaderId.FromString(id);
416
switch (type) {
417
case SHADER_TYPE_VERTEX:
418
{
419
VulkanVertexShader *vs;
420
if (vsCache_.Get(VShaderID(shaderId), &vs)) {
421
return vs ? vs->GetShaderString(stringType) : "null (bad)";
422
} else {
423
return "";
424
}
425
}
426
case SHADER_TYPE_FRAGMENT:
427
{
428
VulkanFragmentShader *fs;
429
if (fsCache_.Get(FShaderID(shaderId), &fs)) {
430
return fs ? fs->GetShaderString(stringType) : "null (bad)";
431
} else {
432
return "";
433
}
434
}
435
case SHADER_TYPE_GEOMETRY:
436
{
437
VulkanGeometryShader *gs;
438
if (gsCache_.Get(GShaderID(shaderId), &gs)) {
439
return gs ? gs->GetShaderString(stringType) : "null (bad)";
440
} else {
441
return "";
442
}
443
}
444
default:
445
return "N/A";
446
}
447
}
448
449
VulkanVertexShader *ShaderManagerVulkan::GetVertexShaderFromModule(VkShaderModule module) {
450
VulkanVertexShader *vs = nullptr;
451
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *shader) {
452
Promise<VkShaderModule> *p = shader->GetModule();
453
VkShaderModule m = p->BlockUntilReady();
454
if (m == module)
455
vs = shader;
456
});
457
return vs;
458
}
459
460
VulkanFragmentShader *ShaderManagerVulkan::GetFragmentShaderFromModule(VkShaderModule module) {
461
VulkanFragmentShader *fs = nullptr;
462
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *shader) {
463
Promise<VkShaderModule> *p = shader->GetModule();
464
VkShaderModule m = p->BlockUntilReady();
465
if (m == module)
466
fs = shader;
467
});
468
return fs;
469
}
470
471
VulkanGeometryShader *ShaderManagerVulkan::GetGeometryShaderFromModule(VkShaderModule module) {
472
VulkanGeometryShader *gs = nullptr;
473
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *shader) {
474
Promise<VkShaderModule> *p = shader->GetModule();
475
VkShaderModule m = p->BlockUntilReady();
476
if (m == module)
477
gs = shader;
478
});
479
return gs;
480
}
481
482
// Shader cache.
483
//
484
// We simply store the IDs of the shaders used during gameplay. On next startup of
485
// the same game, we simply compile all the shaders from the start, so we don't have to
486
// compile them on the fly later. We also store the Vulkan pipeline cache, so if it contains
487
// pipelines compiled from SPIR-V matching these shaders, pipeline creation will be practically
488
// instantaneous.
489
490
enum class VulkanCacheDetectFlags {
491
EQUAL_DEPTH = 1,
492
};
493
494
#define CACHE_HEADER_MAGIC 0xff51f420
495
#define CACHE_VERSION 53
496
497
struct VulkanCacheHeader {
498
uint32_t magic;
499
uint32_t version;
500
uint32_t useFlags;
501
uint32_t detectFlags;
502
int numVertexShaders;
503
int numFragmentShaders;
504
int numGeometryShaders;
505
};
506
507
bool ShaderManagerVulkan::LoadCacheFlags(FILE *f, DrawEngineVulkan *drawEngine) {
508
VulkanCacheHeader header{};
509
long pos = ftell(f);
510
bool success = fread(&header, sizeof(header), 1, f) == 1;
511
// We'll read it again later, this is just to check the flags.
512
success = success && fseek(f, pos, SEEK_SET) == 0;
513
if (!success || header.magic != CACHE_HEADER_MAGIC) {
514
WARN_LOG(Log::G3D, "Shader cache magic mismatch");
515
return false;
516
}
517
if (header.version != CACHE_VERSION) {
518
WARN_LOG(Log::G3D, "Shader cache version mismatch, %d, expected %d", header.version, CACHE_VERSION);
519
return false;
520
}
521
522
if ((header.detectFlags & (uint32_t)VulkanCacheDetectFlags::EQUAL_DEPTH) != 0) {
523
drawEngine->SetEverUsedExactEqualDepth(true);
524
}
525
526
return true;
527
}
528
529
bool ShaderManagerVulkan::LoadCache(FILE *f) {
530
VulkanCacheHeader header{};
531
bool success = fread(&header, sizeof(header), 1, f) == 1;
532
// We don't need to validate magic/version again, done in LoadCacheFlags().
533
534
if (header.useFlags != gstate_c.GetUseFlags()) {
535
// This can simply be a result of sawExactEqualDepth_ having been flipped to true in the previous run.
536
// Let's just keep going.
537
WARN_LOG(Log::G3D, "Shader cache useFlags mismatch, %08x, expected %08x", header.useFlags, gstate_c.GetUseFlags());
538
} else {
539
// We're compiling shaders now, so they haven't changed anymore.
540
gstate_c.useFlagsChanged = false;
541
}
542
543
int failCount = 0;
544
545
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
546
for (int i = 0; i < header.numVertexShaders; i++) {
547
VShaderID id;
548
if (fread(&id, sizeof(id), 1, f) != 1) {
549
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in VertexShaders)");
550
return false;
551
}
552
bool useHWTransform = id.Bit(VS_BIT_USE_HW_TRANSFORM);
553
std::string genErrorString;
554
uint32_t attributeMask = 0;
555
uint64_t uniformMask = 0;
556
VertexShaderFlags flags;
557
if (!GenerateVertexShader(id, codeBuffer_, compat_, draw_->GetBugs(), &attributeMask, &uniformMask, &flags, &genErrorString)) {
558
ERROR_LOG(Log::G3D, "Failed to generate vertex shader during cache load");
559
// We just ignore this one and carry on.
560
failCount++;
561
continue;
562
}
563
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
564
// Don't add the new shader if already compiled - though this should no longer happen.
565
if (!vsCache_.ContainsKey(id)) {
566
VulkanVertexShader *vs = new VulkanVertexShader(vulkan, id, flags, codeBuffer_, useHWTransform);
567
vsCache_.Insert(id, vs);
568
}
569
}
570
uint32_t vendorID = vulkan->GetPhysicalDeviceProperties().properties.vendorID;
571
572
for (int i = 0; i < header.numFragmentShaders; i++) {
573
FShaderID id;
574
if (fread(&id, sizeof(id), 1, f) != 1) {
575
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in FragmentShaders)");
576
return false;
577
}
578
std::string genErrorString;
579
uint64_t uniformMask = 0;
580
FragmentShaderFlags flags;
581
if (!GenerateFragmentShader(id, codeBuffer_, compat_, draw_->GetBugs(), &uniformMask, &flags, &genErrorString)) {
582
ERROR_LOG(Log::G3D, "Failed to generate fragment shader during cache load");
583
// We just ignore this one and carry on.
584
failCount++;
585
continue;
586
}
587
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
588
if (!fsCache_.ContainsKey(id)) {
589
VulkanFragmentShader *fs = new VulkanFragmentShader(vulkan, id, flags, codeBuffer_);
590
fsCache_.Insert(id, fs);
591
}
592
}
593
594
// If it's not enabled, don't create shaders cached from earlier runs - creation will likely fail.
595
if (gstate_c.Use(GPU_USE_GS_CULLING)) {
596
for (int i = 0; i < header.numGeometryShaders; i++) {
597
GShaderID id;
598
if (fread(&id, sizeof(id), 1, f) != 1) {
599
ERROR_LOG(Log::G3D, "Vulkan shader cache truncated (in GeometryShaders)");
600
return false;
601
}
602
std::string genErrorString;
603
if (!GenerateGeometryShader(id, codeBuffer_, compat_, draw_->GetBugs(), &genErrorString)) {
604
ERROR_LOG(Log::G3D, "Failed to generate geometry shader during cache load");
605
// We just ignore this one and carry on.
606
failCount++;
607
continue;
608
}
609
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
610
if (!gsCache_.ContainsKey(id)) {
611
VulkanGeometryShader *gs = new VulkanGeometryShader(vulkan, id, codeBuffer_);
612
gsCache_.Insert(id, gs);
613
}
614
}
615
}
616
617
NOTICE_LOG(Log::G3D, "ShaderCache: Loaded %d vertex, %d fragment shaders and %d geometry shaders (failed %d)", header.numVertexShaders, header.numFragmentShaders, header.numGeometryShaders, failCount);
618
return true;
619
}
620
621
void ShaderManagerVulkan::SaveCache(FILE *f, DrawEngineVulkan *drawEngine) {
622
VulkanCacheHeader header{};
623
header.magic = CACHE_HEADER_MAGIC;
624
header.version = CACHE_VERSION;
625
header.useFlags = gstate_c.GetUseFlags();
626
header.detectFlags = 0;
627
if (drawEngine->EverUsedExactEqualDepth())
628
header.detectFlags |= (uint32_t)VulkanCacheDetectFlags::EQUAL_DEPTH;
629
header.numVertexShaders = (int)vsCache_.size();
630
header.numFragmentShaders = (int)fsCache_.size();
631
header.numGeometryShaders = (int)gsCache_.size();
632
bool writeFailed = fwrite(&header, sizeof(header), 1, f) != 1;
633
vsCache_.Iterate([&](const VShaderID &id, VulkanVertexShader *vs) {
634
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
635
});
636
fsCache_.Iterate([&](const FShaderID &id, VulkanFragmentShader *fs) {
637
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
638
});
639
gsCache_.Iterate([&](const GShaderID &id, VulkanGeometryShader *gs) {
640
writeFailed = writeFailed || fwrite(&id, sizeof(id), 1, f) != 1;
641
});
642
if (writeFailed) {
643
ERROR_LOG(Log::G3D, "Failed to write Vulkan shader cache, disk full?");
644
} else {
645
NOTICE_LOG(Log::G3D, "Saved %d vertex and %d fragment shaders", header.numVertexShaders, header.numFragmentShaders);
646
}
647
}
648
649