Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Common/GPU/Vulkan/VulkanRenderManager.cpp
3187 views
1
#include <cstdint>
2
3
#include <map>
4
#include <sstream>
5
6
#include "Common/Log.h"
7
#include "Common/StringUtils.h"
8
#include "Common/TimeUtil.h"
9
10
#include "Common/GPU/Vulkan/VulkanAlloc.h"
11
#include "Common/GPU/Vulkan/VulkanContext.h"
12
#include "Common/GPU/Vulkan/VulkanRenderManager.h"
13
14
#include "Common/LogReporting.h"
15
#include "Common/Thread/ThreadUtil.h"
16
17
#if 0 // def _DEBUG
18
#define VLOG(...) NOTICE_LOG(Log::G3D, __VA_ARGS__)
19
#else
20
#define VLOG(...)
21
#endif
22
23
#ifndef UINT64_MAX
24
#define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL
25
#endif
26
27
using namespace PPSSPP_VK;
28
29
// renderPass is an example of the "compatibility class" or RenderPassType type.
30
bool VKRGraphicsPipeline::Create(VulkanContext *vulkan, VkRenderPass compatibleRenderPass, RenderPassType rpType, VkSampleCountFlagBits sampleCount, double scheduleTime, int countToCompile) {
31
// Good torture test to test the shutdown-while-precompiling-shaders issue on PC where it's normally
32
// hard to catch because shaders compile so fast.
33
// sleep_ms(200);
34
35
bool multisample = RenderPassTypeHasMultisample(rpType);
36
if (multisample) {
37
if (sampleCount_ != VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM) {
38
_assert_(sampleCount == sampleCount_);
39
} else {
40
sampleCount_ = sampleCount;
41
}
42
}
43
44
// Sanity check.
45
// Seen in crash reports from PowerVR GE8320, presumably we failed creating some shader modules.
46
if (!desc->vertexShader || !desc->fragmentShader) {
47
ERROR_LOG(Log::G3D, "Failed creating graphics pipeline - missing vs/fs shader module pointers!");
48
pipeline[(size_t)rpType]->Post(VK_NULL_HANDLE);
49
return false;
50
}
51
52
// Fill in the last part of the desc since now it's time to block.
53
VkShaderModule vs = desc->vertexShader->BlockUntilReady();
54
VkShaderModule fs = desc->fragmentShader->BlockUntilReady();
55
VkShaderModule gs = desc->geometryShader ? desc->geometryShader->BlockUntilReady() : VK_NULL_HANDLE;
56
57
if (!vs || !fs || (!gs && desc->geometryShader)) {
58
ERROR_LOG(Log::G3D, "Failed creating graphics pipeline - missing shader modules");
59
pipeline[(size_t)rpType]->Post(VK_NULL_HANDLE);
60
return false;
61
}
62
63
if (!compatibleRenderPass) {
64
ERROR_LOG(Log::G3D, "Failed creating graphics pipeline - compatible render pass was nullptr");
65
pipeline[(size_t)rpType]->Post(VK_NULL_HANDLE);
66
return false;
67
}
68
69
uint32_t stageCount = 2;
70
VkPipelineShaderStageCreateInfo ss[3]{};
71
ss[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
72
ss[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
73
ss[0].pSpecializationInfo = nullptr;
74
ss[0].module = vs;
75
ss[0].pName = "main";
76
ss[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
77
ss[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
78
ss[1].pSpecializationInfo = nullptr;
79
ss[1].module = fs;
80
ss[1].pName = "main";
81
if (gs) {
82
stageCount++;
83
ss[2].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
84
ss[2].stage = VK_SHADER_STAGE_GEOMETRY_BIT;
85
ss[2].pSpecializationInfo = nullptr;
86
ss[2].module = gs;
87
ss[2].pName = "main";
88
}
89
90
VkGraphicsPipelineCreateInfo pipe{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO };
91
pipe.pStages = ss;
92
pipe.stageCount = stageCount;
93
pipe.renderPass = compatibleRenderPass;
94
pipe.basePipelineIndex = 0;
95
pipe.pColorBlendState = &desc->cbs;
96
pipe.pDepthStencilState = &desc->dss;
97
pipe.pRasterizationState = &desc->rs;
98
99
VkPipelineMultisampleStateCreateInfo ms{ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO };
100
ms.rasterizationSamples = multisample ? sampleCount : VK_SAMPLE_COUNT_1_BIT;
101
if (multisample && (flags_ & PipelineFlags::USES_DISCARD)) {
102
// Extreme quality
103
ms.sampleShadingEnable = true;
104
ms.minSampleShading = 1.0f;
105
}
106
107
VkPipelineInputAssemblyStateCreateInfo inputAssembly{ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO };
108
inputAssembly.topology = desc->topology;
109
110
// We will use dynamic viewport state.
111
pipe.pVertexInputState = &desc->vis;
112
pipe.pViewportState = &desc->views;
113
pipe.pTessellationState = nullptr;
114
pipe.pDynamicState = &desc->ds;
115
pipe.pInputAssemblyState = &inputAssembly;
116
pipe.pMultisampleState = &ms;
117
pipe.layout = desc->pipelineLayout->pipelineLayout;
118
pipe.basePipelineHandle = VK_NULL_HANDLE;
119
pipe.basePipelineIndex = 0;
120
pipe.subpass = 0;
121
122
double start = time_now_d();
123
VkPipeline vkpipeline;
124
VkResult result = vkCreateGraphicsPipelines(vulkan->GetDevice(), desc->pipelineCache, 1, &pipe, nullptr, &vkpipeline);
125
126
double now = time_now_d();
127
double taken_ms_since_scheduling = (now - scheduleTime) * 1000.0;
128
double taken_ms = (now - start) * 1000.0;
129
130
#ifndef _DEBUG
131
if (taken_ms < 0.1) {
132
DEBUG_LOG(Log::G3D, "Pipeline (x/%d) time on %s: %0.2f ms, %0.2f ms since scheduling (fast) rpType: %04x sampleBits: %d (%s)",
133
countToCompile, GetCurrentThreadName(), taken_ms, taken_ms_since_scheduling, (u32)rpType, (u32)sampleCount, tag_.c_str());
134
} else {
135
INFO_LOG(Log::G3D, "Pipeline (x/%d) time on %s: %0.2f ms, %0.2f ms since scheduling rpType: %04x sampleBits: %d (%s)",
136
countToCompile, GetCurrentThreadName(), taken_ms, taken_ms_since_scheduling, (u32)rpType, (u32)sampleCount, tag_.c_str());
137
}
138
#endif
139
140
bool success = true;
141
if (result == VK_INCOMPLETE) {
142
// Bad (disallowed by spec) return value seen on Adreno in Burnout :( Try to ignore?
143
// Would really like to log more here, we could probably attach more info to desc.
144
//
145
// At least create a null placeholder to avoid creating over and over if something is broken.
146
pipeline[(size_t)rpType]->Post(VK_NULL_HANDLE);
147
ERROR_LOG(Log::G3D, "Failed creating graphics pipeline! VK_INCOMPLETE");
148
LogCreationFailure();
149
success = false;
150
} else if (result != VK_SUCCESS) {
151
pipeline[(size_t)rpType]->Post(VK_NULL_HANDLE);
152
ERROR_LOG(Log::G3D, "Failed creating graphics pipeline! result='%s'", VulkanResultToString(result));
153
LogCreationFailure();
154
success = false;
155
} else {
156
// Success!
157
if (!tag_.empty()) {
158
vulkan->SetDebugName(vkpipeline, VK_OBJECT_TYPE_PIPELINE, tag_.c_str());
159
}
160
pipeline[(size_t)rpType]->Post(vkpipeline);
161
}
162
163
return success;
164
}
165
166
void VKRGraphicsPipeline::DestroyVariants(VulkanContext *vulkan, bool msaaOnly) {
167
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
168
if (!this->pipeline[i])
169
continue;
170
if (msaaOnly && (i & (int)RenderPassType::MULTISAMPLE) == 0)
171
continue;
172
173
VkPipeline pipeline = this->pipeline[i]->BlockUntilReady();
174
// pipeline can be nullptr here, if it failed to compile before.
175
if (pipeline) {
176
vulkan->Delete().QueueDeletePipeline(pipeline);
177
}
178
this->pipeline[i] = nullptr;
179
}
180
sampleCount_ = VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM;
181
}
182
183
void VKRGraphicsPipeline::DestroyVariantsInstant(VkDevice device) {
184
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
185
if (pipeline[i]) {
186
vkDestroyPipeline(device, pipeline[i]->BlockUntilReady(), nullptr);
187
delete pipeline[i];
188
pipeline[i] = nullptr;
189
}
190
}
191
}
192
193
VKRGraphicsPipeline::~VKRGraphicsPipeline() {
194
// This is called from the callbacked queued in QueueForDeletion.
195
// When we reach here, we should already be empty, so let's assert on that.
196
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
197
_assert_(!pipeline[i]);
198
}
199
if (desc)
200
desc->Release();
201
}
202
203
void VKRGraphicsPipeline::BlockUntilCompiled() {
204
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
205
if (pipeline[i]) {
206
pipeline[i]->BlockUntilReady();
207
}
208
}
209
}
210
211
void VKRGraphicsPipeline::QueueForDeletion(VulkanContext *vulkan) {
212
// Can't destroy variants here, the pipeline still lives for a while.
213
vulkan->Delete().QueueCallback([](VulkanContext *vulkan, void *p) {
214
VKRGraphicsPipeline *pipeline = (VKRGraphicsPipeline *)p;
215
pipeline->DestroyVariantsInstant(vulkan->GetDevice());
216
delete pipeline;
217
}, this);
218
}
219
220
u32 VKRGraphicsPipeline::GetVariantsBitmask() const {
221
u32 bitmask = 0;
222
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
223
if (pipeline[i]) {
224
bitmask |= 1 << i;
225
}
226
}
227
return bitmask;
228
}
229
230
void VKRGraphicsPipeline::LogCreationFailure() const {
231
ERROR_LOG(Log::G3D, "vs: %s\n[END VS]", desc->vertexShaderSource.c_str());
232
ERROR_LOG(Log::G3D, "fs: %s\n[END FS]", desc->fragmentShaderSource.c_str());
233
if (desc->geometryShader) {
234
ERROR_LOG(Log::G3D, "gs: %s\n[END GS]", desc->geometryShaderSource.c_str());
235
}
236
// TODO: Maybe log various other state?
237
ERROR_LOG(Log::G3D, "======== END OF PIPELINE ==========");
238
}
239
240
struct SinglePipelineTask {
241
VKRGraphicsPipeline *pipeline;
242
VkRenderPass compatibleRenderPass;
243
RenderPassType rpType;
244
VkSampleCountFlagBits sampleCount;
245
double scheduleTime;
246
int countToCompile;
247
};
248
249
class CreateMultiPipelinesTask : public Task {
250
public:
251
CreateMultiPipelinesTask(VulkanContext *vulkan, std::vector<SinglePipelineTask> tasks) : vulkan_(vulkan), tasks_(std::move(tasks)) {
252
tasksInFlight_.fetch_add(1);
253
}
254
~CreateMultiPipelinesTask() = default;
255
256
TaskType Type() const override {
257
return TaskType::CPU_COMPUTE;
258
}
259
260
TaskPriority Priority() const override {
261
return TaskPriority::HIGH;
262
}
263
264
void Run() override {
265
for (auto &task : tasks_) {
266
task.pipeline->Create(vulkan_, task.compatibleRenderPass, task.rpType, task.sampleCount, task.scheduleTime, task.countToCompile);
267
}
268
tasksInFlight_.fetch_sub(1);
269
}
270
271
VulkanContext *vulkan_;
272
std::vector<SinglePipelineTask> tasks_;
273
274
// Use during shutdown to make sure there aren't any leftover tasks sitting queued.
275
// Could probably be done more elegantly. Like waiting for all tasks of a type, or saving pointers to them, or something...
276
// Returns the maximum value of tasks in flight seen during the wait.
277
static int WaitForAll();
278
static std::atomic<int> tasksInFlight_;
279
};
280
281
int CreateMultiPipelinesTask::WaitForAll() {
282
int inFlight = 0;
283
int maxInFlight = 0;
284
while ((inFlight = tasksInFlight_.load()) > 0) {
285
if (inFlight > maxInFlight) {
286
maxInFlight = inFlight;
287
}
288
sleep_ms(2, "create-multi-pipelines-wait");
289
}
290
return maxInFlight;
291
}
292
293
std::atomic<int> CreateMultiPipelinesTask::tasksInFlight_;
294
295
VulkanRenderManager::VulkanRenderManager(VulkanContext *vulkan, bool useThread, HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY_LENGTH> &frameTimeHistory)
296
: vulkan_(vulkan), queueRunner_(vulkan),
297
initTimeMs_("initTimeMs"),
298
totalGPUTimeMs_("totalGPUTimeMs"),
299
renderCPUTimeMs_("renderCPUTimeMs"),
300
descUpdateTimeMs_("descUpdateCPUTimeMs"),
301
useRenderThread_(useThread),
302
frameTimeHistory_(frameTimeHistory)
303
{
304
inflightFramesAtStart_ = vulkan_->GetInflightFrames();
305
306
// For present timing experiments. Disabled for now.
307
measurePresentTime_ = false;
308
309
frameDataShared_.Init(vulkan, useThread, measurePresentTime_);
310
311
for (int i = 0; i < inflightFramesAtStart_; i++) {
312
frameData_[i].Init(vulkan, i);
313
}
314
315
queueRunner_.CreateDeviceObjects();
316
}
317
318
bool VulkanRenderManager::CreateBackbuffers() {
319
if (!vulkan_->IsSwapchainInited()) {
320
ERROR_LOG(Log::G3D, "No swapchain - can't create backbuffers");
321
return false;
322
}
323
324
VkCommandBuffer cmdInit = GetInitCmd();
325
326
if (vulkan_->HasRealSwapchain()) {
327
if (!CreateSwapchainViewsAndDepth(cmdInit, &postInitBarrier_, frameDataShared_)) {
328
return false;
329
}
330
}
331
332
curWidthRaw_ = -1;
333
curHeightRaw_ = -1;
334
335
if (newInflightFrames_ != -1) {
336
INFO_LOG(Log::G3D, "Updating inflight frames to %d", newInflightFrames_);
337
vulkan_->UpdateInflightFrames(newInflightFrames_);
338
newInflightFrames_ = -1;
339
}
340
341
outOfDateFrames_ = 0;
342
343
for (int i = 0; i < vulkan_->GetInflightFrames(); i++) {
344
auto &frameData = frameData_[i];
345
frameData.readyForFence = true; // Just in case.
346
}
347
348
// Start the thread(s).
349
StartThreads();
350
return true;
351
}
352
353
bool VulkanRenderManager::CreateSwapchainViewsAndDepth(VkCommandBuffer cmdInit, VulkanBarrierBatch *barriers, FrameDataShared &frameDataShared) {
354
VkResult res = vkGetSwapchainImagesKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), &frameDataShared.swapchainImageCount_, nullptr);
355
_dbg_assert_(res == VK_SUCCESS);
356
357
VkImage *swapchainImages = new VkImage[frameDataShared.swapchainImageCount_];
358
res = vkGetSwapchainImagesKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), &frameDataShared.swapchainImageCount_, swapchainImages);
359
if (res != VK_SUCCESS) {
360
ERROR_LOG(Log::G3D, "vkGetSwapchainImagesKHR failed");
361
delete[] swapchainImages;
362
return false;
363
}
364
365
static const VkSemaphoreCreateInfo semaphoreCreateInfo = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
366
for (uint32_t i = 0; i < frameDataShared.swapchainImageCount_; i++) {
367
SwapchainImageData sc_buffer{};
368
sc_buffer.image = swapchainImages[i];
369
res = vkCreateSemaphore(vulkan_->GetDevice(), &semaphoreCreateInfo, nullptr, &sc_buffer.renderingCompleteSemaphore);
370
_dbg_assert_(res == VK_SUCCESS);
371
372
VkImageViewCreateInfo color_image_view = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
373
color_image_view.format = vulkan_->GetSwapchainFormat();
374
color_image_view.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
375
color_image_view.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
376
color_image_view.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
377
color_image_view.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
378
color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
379
color_image_view.subresourceRange.baseMipLevel = 0;
380
color_image_view.subresourceRange.levelCount = 1;
381
color_image_view.subresourceRange.baseArrayLayer = 0;
382
color_image_view.subresourceRange.layerCount = 1; // TODO: Investigate hw-assisted stereo.
383
color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D;
384
color_image_view.flags = 0;
385
color_image_view.image = sc_buffer.image;
386
387
// We leave the images as UNDEFINED, there's no need to pre-transition them as
388
// the backbuffer renderpass starts out with them being auto-transitioned from UNDEFINED anyway.
389
// Also, turns out it's illegal to transition un-acquired images, thanks Hans-Kristian. See #11417.
390
391
res = vkCreateImageView(vulkan_->GetDevice(), &color_image_view, nullptr, &sc_buffer.view);
392
vulkan_->SetDebugName(sc_buffer.view, VK_OBJECT_TYPE_IMAGE_VIEW, "swapchain_view");
393
frameDataShared.swapchainImages_.push_back(sc_buffer);
394
_dbg_assert_(res == VK_SUCCESS);
395
}
396
delete[] swapchainImages;
397
398
// Must be before InitBackbufferRenderPass.
399
if (queueRunner_.InitDepthStencilBuffer(cmdInit, barriers)) {
400
queueRunner_.InitBackbufferFramebuffers(vulkan_->GetBackbufferWidth(), vulkan_->GetBackbufferHeight(), frameDataShared);
401
}
402
return true;
403
}
404
405
void VulkanRenderManager::StartThreads() {
406
{
407
std::unique_lock<std::mutex> lock(compileQueueMutex_);
408
_assert_(compileQueue_.empty());
409
}
410
411
runCompileThread_ = true; // For controlling the compiler thread's exit
412
413
if (useRenderThread_) {
414
INFO_LOG(Log::G3D, "Starting Vulkan submission thread");
415
renderThread_ = std::thread(&VulkanRenderManager::RenderThreadFunc, this);
416
}
417
INFO_LOG(Log::G3D, "Starting Vulkan compiler thread");
418
compileThread_ = std::thread(&VulkanRenderManager::CompileThreadFunc, this);
419
420
if (measurePresentTime_ && vulkan_->Extensions().KHR_present_wait && vulkan_->GetPresentMode() == VK_PRESENT_MODE_FIFO_KHR) {
421
INFO_LOG(Log::G3D, "Starting Vulkan present wait thread");
422
presentWaitThread_ = std::thread(&VulkanRenderManager::PresentWaitThreadFunc, this);
423
}
424
}
425
426
// Called from main thread.
427
void VulkanRenderManager::StopThreads() {
428
// Make sure we don't have an open non-backbuffer render pass
429
if (curRenderStep_ && curRenderStep_->render.framebuffer != nullptr) {
430
EndCurRenderStep();
431
}
432
// Not sure this is a sensible check - should be ok even if not.
433
// _dbg_assert_(steps_.empty());
434
435
if (useRenderThread_) {
436
_dbg_assert_(renderThread_.joinable());
437
// Tell the render thread to quit when it's done.
438
VKRRenderThreadTask *task = new VKRRenderThreadTask(VKRRunType::EXIT);
439
task->frame = vulkan_->GetCurFrame();
440
{
441
std::unique_lock<std::mutex> lock(pushMutex_);
442
renderThreadQueue_.push(task);
443
}
444
pushCondVar_.notify_one();
445
// Once the render thread encounters the above exit task, it'll exit.
446
renderThread_.join();
447
INFO_LOG(Log::G3D, "Vulkan submission thread joined. Frame=%d", vulkan_->GetCurFrame());
448
}
449
450
for (int i = 0; i < vulkan_->GetInflightFrames(); i++) {
451
auto &frameData = frameData_[i];
452
// Zero the queries so we don't try to pull them later.
453
frameData.profile.timestampDescriptions.clear();
454
}
455
456
{
457
std::unique_lock<std::mutex> lock(compileQueueMutex_);
458
runCompileThread_ = false; // Compiler and present thread both look at this bool.
459
_assert_(compileThread_.joinable());
460
compileCond_.notify_one();
461
}
462
compileThread_.join();
463
464
if (presentWaitThread_.joinable()) {
465
presentWaitThread_.join();
466
}
467
468
INFO_LOG(Log::G3D, "Vulkan compiler thread joined. Now wait for any straggling compile tasks. runCompileThread_ = %d", (int)runCompileThread_);
469
CreateMultiPipelinesTask::WaitForAll();
470
471
{
472
std::unique_lock<std::mutex> lock(compileQueueMutex_);
473
_assert_(compileQueue_.empty());
474
}
475
}
476
477
void VulkanRenderManager::DestroyBackbuffers() {
478
StopThreads();
479
vulkan_->WaitUntilQueueIdle();
480
481
for (auto &image : frameDataShared_.swapchainImages_) {
482
vulkan_->Delete().QueueDeleteImageView(image.view);
483
vkDestroySemaphore(vulkan_->GetDevice(), image.renderingCompleteSemaphore, nullptr);
484
}
485
frameDataShared_.swapchainImages_.clear();
486
frameDataShared_.swapchainImageCount_ = 0;
487
488
queueRunner_.DestroyBackBuffers();
489
}
490
491
// Hm, I'm finding the occasional report of these asserts.
492
void VulkanRenderManager::CheckNothingPending() {
493
_assert_(pipelinesToCheck_.empty());
494
{
495
std::unique_lock<std::mutex> lock(compileQueueMutex_);
496
_assert_(compileQueue_.empty());
497
}
498
}
499
500
VulkanRenderManager::~VulkanRenderManager() {
501
INFO_LOG(Log::G3D, "VulkanRenderManager destructor");
502
503
{
504
std::unique_lock<std::mutex> lock(compileQueueMutex_);
505
_assert_(compileQueue_.empty());
506
}
507
508
if (useRenderThread_) {
509
_dbg_assert_(!renderThread_.joinable());
510
}
511
512
_dbg_assert_(!runCompileThread_); // StopThread should already have been called from DestroyBackbuffers.
513
514
vulkan_->WaitUntilQueueIdle();
515
516
_dbg_assert_(pipelineLayouts_.empty());
517
518
VkDevice device = vulkan_->GetDevice();
519
frameDataShared_.Destroy(vulkan_);
520
for (int i = 0; i < inflightFramesAtStart_; i++) {
521
frameData_[i].Destroy(vulkan_);
522
}
523
queueRunner_.DestroyDeviceObjects();
524
}
525
526
void VulkanRenderManager::CompileThreadFunc() {
527
SetCurrentThreadName("ShaderCompile");
528
while (true) {
529
bool exitAfterCompile = false;
530
std::vector<CompileQueueEntry> toCompile;
531
{
532
std::unique_lock<std::mutex> lock(compileQueueMutex_);
533
while (compileQueue_.empty() && runCompileThread_) {
534
compileCond_.wait(lock);
535
}
536
toCompile = std::move(compileQueue_);
537
compileQueue_.clear();
538
if (!runCompileThread_) {
539
exitAfterCompile = true;
540
}
541
}
542
543
int countToCompile = (int)toCompile.size();
544
545
// Here we sort the pending pipelines by vertex and fragment shaders,
546
std::map<std::pair<Promise<VkShaderModule> *, Promise<VkShaderModule> *>, std::vector<SinglePipelineTask>> map;
547
548
double scheduleTime = time_now_d();
549
550
// Here we sort pending graphics pipelines by vertex and fragment shaders, and split up further.
551
// Those with the same pairs of shaders should be on the same thread, at least on NVIDIA.
552
// I don't think PowerVR cares though, it doesn't seem to reuse information between the compiles,
553
// so we might want a different splitting algorithm there.
554
for (auto &entry : toCompile) {
555
switch (entry.type) {
556
case CompileQueueEntry::Type::GRAPHICS:
557
{
558
map[std::make_pair(entry.graphics->desc->vertexShader, entry.graphics->desc->fragmentShader)].push_back(
559
SinglePipelineTask{
560
entry.graphics,
561
entry.compatibleRenderPass,
562
entry.renderPassType,
563
entry.sampleCount,
564
scheduleTime, // these two are for logging purposes.
565
countToCompile,
566
}
567
);
568
break;
569
}
570
}
571
}
572
573
for (const auto &iter : map) {
574
auto &shaders = iter.first;
575
auto &entries = iter.second;
576
577
// NOTICE_LOG(Log::G3D, "For this shader pair, we have %d pipelines to create", (int)entries.size());
578
579
Task *task = new CreateMultiPipelinesTask(vulkan_, entries);
580
g_threadManager.EnqueueTask(task);
581
}
582
583
if (exitAfterCompile) {
584
break;
585
}
586
587
// Hold off just a bit before we check again, to allow bunches of pipelines to collect.
588
sleep_ms(1, "pipeline-collect");
589
}
590
591
std::unique_lock<std::mutex> lock(compileQueueMutex_);
592
_assert_(compileQueue_.empty());
593
}
594
595
void VulkanRenderManager::RenderThreadFunc() {
596
SetCurrentThreadName("VulkanRenderMan");
597
while (true) {
598
_dbg_assert_(useRenderThread_);
599
600
// Pop a task of the queue and execute it.
601
VKRRenderThreadTask *task = nullptr;
602
{
603
std::unique_lock<std::mutex> lock(pushMutex_);
604
while (renderThreadQueue_.empty()) {
605
pushCondVar_.wait(lock);
606
}
607
task = renderThreadQueue_.front();
608
renderThreadQueue_.pop();
609
}
610
611
// Oh, we got a task! We can now have pushMutex_ unlocked, allowing the host to
612
// push more work when it feels like it, and just start working.
613
if (task->runType == VKRRunType::EXIT) {
614
// Oh, host wanted out. Let's leave.
615
delete task;
616
// In this case, there should be no more tasks.
617
break;
618
}
619
620
Run(*task);
621
delete task;
622
}
623
624
// Wait for the device to be done with everything, before tearing stuff down.
625
// TODO: Do we really need this? It's probably a good idea, though.
626
vkDeviceWaitIdle(vulkan_->GetDevice());
627
VLOG("PULL: Quitting");
628
}
629
630
void VulkanRenderManager::PresentWaitThreadFunc() {
631
SetCurrentThreadName("PresentWait");
632
633
#if !PPSSPP_PLATFORM(IOS_APP_STORE)
634
_dbg_assert_(vkWaitForPresentKHR != nullptr);
635
636
uint64_t waitedId = frameIdGen_;
637
while (runCompileThread_) {
638
const uint64_t timeout = 1000000000ULL; // 1 sec
639
if (VK_SUCCESS == vkWaitForPresentKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), waitedId, timeout)) {
640
frameTimeHistory_[waitedId].actualPresent = time_now_d();
641
frameTimeHistory_[waitedId].waitCount++;
642
waitedId++;
643
} else {
644
// We caught up somehow, which is a bad sign (we should have blocked, right?). Maybe we should break out of the loop?
645
sleep_ms(1, "present-wait-problem");
646
frameTimeHistory_[waitedId].waitCount++;
647
}
648
_dbg_assert_(waitedId <= frameIdGen_);
649
}
650
#endif
651
652
INFO_LOG(Log::G3D, "Leaving PresentWaitThreadFunc()");
653
}
654
655
void VulkanRenderManager::PollPresentTiming() {
656
// For VK_GOOGLE_display_timing, we need to poll.
657
658
// Poll for information about completed frames.
659
// NOTE: We seem to get the information pretty late! Like after 6 frames, which is quite weird.
660
// Tested on POCO F4.
661
// TODO: Getting validation errors that this should be called from the thread doing the presenting.
662
// Probably a fair point. For now, we turn it off.
663
if (measurePresentTime_ && vulkan_->Extensions().GOOGLE_display_timing) {
664
uint32_t count = 0;
665
vkGetPastPresentationTimingGOOGLE(vulkan_->GetDevice(), vulkan_->GetSwapchain(), &count, nullptr);
666
if (count > 0) {
667
VkPastPresentationTimingGOOGLE *timings = new VkPastPresentationTimingGOOGLE[count];
668
vkGetPastPresentationTimingGOOGLE(vulkan_->GetDevice(), vulkan_->GetSwapchain(), &count, timings);
669
for (uint32_t i = 0; i < count; i++) {
670
uint64_t presentId = timings[i].presentID;
671
frameTimeHistory_[presentId].actualPresent = from_time_raw(timings[i].actualPresentTime);
672
frameTimeHistory_[presentId].desiredPresentTime = from_time_raw(timings[i].desiredPresentTime);
673
frameTimeHistory_[presentId].earliestPresentTime = from_time_raw(timings[i].earliestPresentTime);
674
double presentMargin = from_time_raw_relative(timings[i].presentMargin);
675
frameTimeHistory_[presentId].presentMargin = presentMargin;
676
}
677
delete[] timings;
678
}
679
}
680
}
681
682
void VulkanRenderManager::BeginFrame(bool enableProfiling, bool enableLogProfiler) {
683
double frameBeginTime = time_now_d()
684
VLOG("BeginFrame");
685
VkDevice device = vulkan_->GetDevice();
686
687
int curFrame = vulkan_->GetCurFrame();
688
FrameData &frameData = frameData_[curFrame];
689
VLOG("PUSH: Fencing %d", curFrame);
690
691
// Makes sure the submission from the previous time around has happened. Otherwise
692
// we are not allowed to wait from another thread here..
693
if (useRenderThread_) {
694
std::unique_lock<std::mutex> lock(frameData.fenceMutex);
695
while (!frameData.readyForFence) {
696
frameData.fenceCondVar.wait(lock);
697
}
698
frameData.readyForFence = false;
699
}
700
701
// This must be the very first Vulkan call we do in a new frame.
702
// Makes sure the very last command buffer from the frame before the previous has been fully executed.
703
if (vkWaitForFences(device, 1, &frameData.fence, true, UINT64_MAX) == VK_ERROR_DEVICE_LOST) {
704
_assert_msg_(false, "Device lost in vkWaitForFences");
705
}
706
vkResetFences(device, 1, &frameData.fence);
707
708
uint64_t frameId = frameIdGen_++;
709
710
PollPresentTiming();
711
712
ResetDescriptorLists(curFrame);
713
714
int validBits = vulkan_->GetQueueFamilyProperties(vulkan_->GetGraphicsQueueFamilyIndex()).timestampValidBits;
715
716
FrameTimeData &frameTimeData = frameTimeHistory_.Add(frameId);
717
frameTimeData.frameId = frameId;
718
frameTimeData.frameBegin = frameBeginTime;
719
frameTimeData.afterFenceWait = time_now_d();
720
721
// Can't set this until after the fence.
722
frameData.profile.enabled = enableProfiling;
723
frameData.profile.timestampsEnabled = enableProfiling && validBits > 0;
724
frameData.frameId = frameId;
725
726
uint64_t queryResults[MAX_TIMESTAMP_QUERIES];
727
728
if (enableProfiling) {
729
// Pull the profiling results from last time and produce a summary!
730
if (!frameData.profile.timestampDescriptions.empty() && frameData.profile.timestampsEnabled) {
731
int numQueries = (int)frameData.profile.timestampDescriptions.size();
732
VkResult res = vkGetQueryPoolResults(
733
vulkan_->GetDevice(),
734
frameData.profile.queryPool, 0, numQueries, sizeof(uint64_t) * numQueries, &queryResults[0], sizeof(uint64_t),
735
VK_QUERY_RESULT_64_BIT);
736
if (res == VK_SUCCESS) {
737
double timestampConversionFactor = (double)vulkan_->GetPhysicalDeviceProperties().properties.limits.timestampPeriod * (1.0 / 1000000.0);
738
uint64_t timestampDiffMask = validBits == 64 ? 0xFFFFFFFFFFFFFFFFULL : ((1ULL << validBits) - 1);
739
std::stringstream str;
740
741
char line[256];
742
totalGPUTimeMs_.Update(((double)((queryResults[numQueries - 1] - queryResults[0]) & timestampDiffMask) * timestampConversionFactor));
743
totalGPUTimeMs_.Format(line, sizeof(line));
744
str << line;
745
renderCPUTimeMs_.Update((frameData.profile.cpuEndTime - frameData.profile.cpuStartTime) * 1000.0);
746
renderCPUTimeMs_.Format(line, sizeof(line));
747
str << line;
748
descUpdateTimeMs_.Update(frameData.profile.descWriteTime * 1000.0);
749
descUpdateTimeMs_.Format(line, sizeof(line));
750
str << line;
751
snprintf(line, sizeof(line), "Descriptors written: %d (dedup: %d)\n", frameData.profile.descriptorsWritten, frameData.profile.descriptorsDeduped);
752
str << line;
753
snprintf(line, sizeof(line), "Resource deletions: %d\n", vulkan_->GetLastDeleteCount());
754
str << line;
755
for (int i = 0; i < numQueries - 1; i++) {
756
uint64_t diff = (queryResults[i + 1] - queryResults[i]) & timestampDiffMask;
757
double milliseconds = (double)diff * timestampConversionFactor;
758
759
// Can't use SimpleStat for these very easily since these are dynamic per frame.
760
// Only the first one is static, the initCmd.
761
// Could try some hashtable tracking for the rest, later.
762
if (i == 0) {
763
initTimeMs_.Update(milliseconds);
764
initTimeMs_.Format(line, sizeof(line));
765
} else {
766
snprintf(line, sizeof(line), "%s: %0.3f ms\n", frameData.profile.timestampDescriptions[i + 1].c_str(), milliseconds);
767
}
768
str << line;
769
}
770
frameData.profile.profileSummary = str.str();
771
} else {
772
frameData.profile.profileSummary = "(error getting GPU profile - not ready?)";
773
}
774
} else {
775
std::stringstream str;
776
char line[256];
777
renderCPUTimeMs_.Update((frameData.profile.cpuEndTime - frameData.profile.cpuStartTime) * 1000.0);
778
renderCPUTimeMs_.Format(line, sizeof(line));
779
str << line;
780
descUpdateTimeMs_.Update(frameData.profile.descWriteTime * 1000.0);
781
descUpdateTimeMs_.Format(line, sizeof(line));
782
str << line;
783
snprintf(line, sizeof(line), "Descriptors written: %d\n", frameData.profile.descriptorsWritten);
784
str << line;
785
frameData.profile.profileSummary = str.str();
786
}
787
788
#ifdef _DEBUG
789
std::string cmdString;
790
for (int i = 0; i < ARRAY_SIZE(frameData.profile.commandCounts); i++) {
791
if (frameData.profile.commandCounts[i] > 0) {
792
cmdString += StringFromFormat("%s: %d\n", VKRRenderCommandToString((VKRRenderCommand)i), frameData.profile.commandCounts[i]);
793
}
794
}
795
memset(frameData.profile.commandCounts, 0, sizeof(frameData.profile.commandCounts));
796
frameData.profile.profileSummary += cmdString;
797
#endif
798
}
799
800
frameData.profile.descriptorsWritten = 0;
801
frameData.profile.descriptorsDeduped = 0;
802
803
// Must be after the fence - this performs deletes.
804
VLOG("PUSH: BeginFrame %d", curFrame);
805
806
insideFrame_ = true;
807
vulkan_->BeginFrame(enableLogProfiler ? GetInitCmd() : VK_NULL_HANDLE);
808
809
frameData.profile.timestampDescriptions.clear();
810
if (frameData.profile.timestampsEnabled) {
811
// For various reasons, we need to always use an init cmd buffer in this case to perform the vkCmdResetQueryPool,
812
// unless we want to limit ourselves to only measure the main cmd buffer.
813
// Later versions of Vulkan have support for clearing queries on the CPU timeline, but we don't want to rely on that.
814
// Reserve the first two queries for initCmd.
815
frameData.profile.timestampDescriptions.emplace_back("initCmd Begin");
816
frameData.profile.timestampDescriptions.emplace_back("initCmd");
817
VkCommandBuffer initCmd = GetInitCmd();
818
}
819
}
820
821
VkCommandBuffer VulkanRenderManager::GetInitCmd() {
822
int curFrame = vulkan_->GetCurFrame();
823
return frameData_[curFrame].GetInitCmd(vulkan_);
824
}
825
826
void VulkanRenderManager::ReportBadStateForDraw() {
827
const char *cause1 = "";
828
char cause2[256];
829
cause2[0] = '\0';
830
if (!curRenderStep_) {
831
cause1 = "No current render step";
832
}
833
if (curRenderStep_ && curRenderStep_->stepType != VKRStepType::RENDER) {
834
cause1 = "Not a render step: ";
835
std::string str = VulkanQueueRunner::StepToString(vulkan_, *curRenderStep_);
836
truncate_cpy(cause2, str.c_str());
837
}
838
ERROR_LOG_REPORT_ONCE(baddraw, Log::G3D, "Can't draw: %s%s. Step count: %d", cause1, cause2, (int)steps_.size());
839
}
840
841
int VulkanRenderManager::WaitForPipelines() {
842
return CreateMultiPipelinesTask::WaitForAll();
843
}
844
845
VKRGraphicsPipeline *VulkanRenderManager::CreateGraphicsPipeline(VKRGraphicsPipelineDesc *desc, PipelineFlags pipelineFlags, uint32_t variantBitmask, VkSampleCountFlagBits sampleCount, bool cacheLoad, const char *tag) {
846
if (!desc->vertexShader || !desc->fragmentShader) {
847
ERROR_LOG(Log::G3D, "Can't create graphics pipeline with missing vs/ps: %p %p", desc->vertexShader, desc->fragmentShader);
848
return nullptr;
849
}
850
851
VKRGraphicsPipeline *pipeline = new VKRGraphicsPipeline(pipelineFlags, tag);
852
pipeline->desc = desc;
853
pipeline->desc->AddRef();
854
if (curRenderStep_ && !cacheLoad) {
855
// The common case during gameplay.
856
pipelinesToCheck_.push_back(pipeline);
857
} else {
858
if (!variantBitmask) {
859
WARN_LOG(Log::G3D, "WARNING: Will not compile any variants of pipeline, not in renderpass and empty variantBitmask");
860
}
861
// Presumably we're in initialization, loading the shader cache.
862
// Look at variantBitmask to see what variants we should queue up.
863
RPKey key{
864
VKRRenderPassLoadAction::CLEAR, VKRRenderPassLoadAction::CLEAR, VKRRenderPassLoadAction::CLEAR,
865
VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE,
866
};
867
VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key);
868
std::unique_lock<std::mutex> lock(compileQueueMutex_);
869
_dbg_assert_(runCompileThread_);
870
bool needsCompile = false;
871
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
872
if (!(variantBitmask & (1 << i)))
873
continue;
874
RenderPassType rpType = (RenderPassType)i;
875
876
// Sanity check - don't compile incompatible types (could be caused by corrupt caches, changes in data structures, etc).
877
if ((pipelineFlags & PipelineFlags::USES_DEPTH_STENCIL) && !RenderPassTypeHasDepth(rpType)) {
878
WARN_LOG(Log::G3D, "Not compiling pipeline that requires depth, for non depth renderpass type");
879
continue;
880
}
881
// Shouldn't hit this, these should have been filtered elsewhere. However, still a good check to do.
882
if (sampleCount == VK_SAMPLE_COUNT_1_BIT && RenderPassTypeHasMultisample(rpType)) {
883
WARN_LOG(Log::G3D, "Not compiling single sample pipeline for a multisampled render pass type");
884
continue;
885
}
886
887
if (rpType == RenderPassType::BACKBUFFER) {
888
sampleCount = VK_SAMPLE_COUNT_1_BIT;
889
}
890
891
// Sanity check
892
if (runCompileThread_) {
893
pipeline->pipeline[i] = Promise<VkPipeline>::CreateEmpty();
894
compileQueue_.emplace_back(pipeline, compatibleRenderPass->Get(vulkan_, rpType, sampleCount), rpType, sampleCount);
895
}
896
needsCompile = true;
897
}
898
if (needsCompile)
899
compileCond_.notify_one();
900
}
901
return pipeline;
902
}
903
904
void VulkanRenderManager::EndCurRenderStep() {
905
if (!curRenderStep_)
906
return;
907
908
_dbg_assert_(runCompileThread_);
909
910
RPKey key{
911
curRenderStep_->render.colorLoad, curRenderStep_->render.depthLoad, curRenderStep_->render.stencilLoad,
912
curRenderStep_->render.colorStore, curRenderStep_->render.depthStore, curRenderStep_->render.stencilStore,
913
};
914
// Save the accumulated pipeline flags so we can use that to configure the render pass.
915
// We'll often be able to avoid loading/saving the depth/stencil buffer.
916
curRenderStep_->render.pipelineFlags = curPipelineFlags_;
917
bool depthStencil = (curPipelineFlags_ & PipelineFlags::USES_DEPTH_STENCIL) != 0;
918
RenderPassType rpType = depthStencil ? RenderPassType::HAS_DEPTH : RenderPassType::DEFAULT;
919
920
if (curRenderStep_->render.framebuffer && (rpType & RenderPassType::HAS_DEPTH) && !curRenderStep_->render.framebuffer->HasDepth()) {
921
WARN_LOG(Log::G3D, "Trying to render with a depth-writing pipeline to a framebuffer without depth: %s", curRenderStep_->render.framebuffer->Tag());
922
rpType = RenderPassType::DEFAULT;
923
}
924
925
if (!curRenderStep_->render.framebuffer) {
926
rpType = RenderPassType::BACKBUFFER;
927
} else {
928
// Framebuffers can be stereo, and if so, will control the render pass type to match.
929
// Pipelines can be mono and render fine to stereo etc, so not checking them here.
930
// Note that we don't support rendering to just one layer of a multilayer framebuffer!
931
if (curRenderStep_->render.framebuffer->numLayers > 1) {
932
rpType = (RenderPassType)(rpType | RenderPassType::MULTIVIEW);
933
}
934
935
if (curRenderStep_->render.framebuffer->sampleCount != VK_SAMPLE_COUNT_1_BIT) {
936
rpType = (RenderPassType)(rpType | RenderPassType::MULTISAMPLE);
937
}
938
}
939
940
VKRRenderPass *renderPass = queueRunner_.GetRenderPass(key);
941
curRenderStep_->render.renderPassType = rpType;
942
943
VkSampleCountFlagBits sampleCount = curRenderStep_->render.framebuffer ? curRenderStep_->render.framebuffer->sampleCount : VK_SAMPLE_COUNT_1_BIT;
944
945
bool needsCompile = false;
946
for (VKRGraphicsPipeline *pipeline : pipelinesToCheck_) {
947
if (!pipeline) {
948
// Not good, but let's try not to crash.
949
continue;
950
}
951
std::unique_lock<std::mutex> lock(pipeline->mutex_);
952
if (!pipeline->pipeline[(size_t)rpType]) {
953
pipeline->pipeline[(size_t)rpType] = Promise<VkPipeline>::CreateEmpty();
954
lock.unlock();
955
956
_assert_(renderPass);
957
compileQueueMutex_.lock();
958
compileQueue_.emplace_back(pipeline, renderPass->Get(vulkan_, rpType, sampleCount), rpType, sampleCount);
959
compileQueueMutex_.unlock();
960
needsCompile = true;
961
}
962
}
963
964
compileQueueMutex_.lock();
965
if (needsCompile)
966
compileCond_.notify_one();
967
compileQueueMutex_.unlock();
968
pipelinesToCheck_.clear();
969
970
// We don't do this optimization for very small targets, probably not worth it.
971
if (!curRenderArea_.Empty() && (curWidth_ > 32 && curHeight_ > 32)) {
972
curRenderStep_->render.renderArea = curRenderArea_.ToVkRect2D();
973
} else {
974
curRenderStep_->render.renderArea.offset = {};
975
curRenderStep_->render.renderArea.extent = { (uint32_t)curWidth_, (uint32_t)curHeight_ };
976
}
977
curRenderArea_.Reset();
978
979
// We no longer have a current render step.
980
curRenderStep_ = nullptr;
981
curPipelineFlags_ = (PipelineFlags)0;
982
}
983
984
void VulkanRenderManager::BindFramebufferAsRenderTarget(VKRFramebuffer *fb, VKRRenderPassLoadAction color, VKRRenderPassLoadAction depth, VKRRenderPassLoadAction stencil, uint32_t clearColor, float clearDepth, uint8_t clearStencil, const char *tag) {
985
_dbg_assert_(insideFrame_);
986
987
// Eliminate dupes (bind of the framebuffer we already are rendering to), instantly convert to a clear if possible.
988
if (!steps_.empty() && steps_.back()->stepType == VKRStepType::RENDER && steps_.back()->render.framebuffer == fb) {
989
u32 clearMask = 0;
990
if (color == VKRRenderPassLoadAction::CLEAR) {
991
clearMask |= VK_IMAGE_ASPECT_COLOR_BIT;
992
}
993
if (depth == VKRRenderPassLoadAction::CLEAR) {
994
clearMask |= VK_IMAGE_ASPECT_DEPTH_BIT;
995
curPipelineFlags_ |= PipelineFlags::USES_DEPTH_STENCIL;
996
}
997
if (stencil == VKRRenderPassLoadAction::CLEAR) {
998
clearMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
999
curPipelineFlags_ |= PipelineFlags::USES_DEPTH_STENCIL;
1000
}
1001
1002
// If we need a clear and the previous step has commands already, it's best to just add a clear and keep going.
1003
// If there's no clear needed, let's also do that.
1004
//
1005
// However, if we do need a clear and there are no commands in the previous pass,
1006
// we want the queuerunner to have the opportunity to merge, so we'll go ahead and make a new renderpass.
1007
if (clearMask == 0 || !steps_.back()->commands.empty()) {
1008
curRenderStep_ = steps_.back();
1009
curStepHasViewport_ = false;
1010
curStepHasScissor_ = false;
1011
for (const auto &c : steps_.back()->commands) {
1012
if (c.cmd == VKRRenderCommand::VIEWPORT) {
1013
curStepHasViewport_ = true;
1014
} else if (c.cmd == VKRRenderCommand::SCISSOR) {
1015
curStepHasScissor_ = true;
1016
}
1017
}
1018
if (clearMask != 0) {
1019
VkRenderData data{ VKRRenderCommand::CLEAR };
1020
data.clear.clearColor = clearColor;
1021
data.clear.clearZ = clearDepth;
1022
data.clear.clearStencil = clearStencil;
1023
data.clear.clearMask = clearMask;
1024
curRenderStep_->commands.push_back(data);
1025
curRenderArea_.SetRect(0, 0, curWidth_, curHeight_);
1026
}
1027
return;
1028
}
1029
}
1030
1031
#ifdef _DEBUG
1032
SanityCheckPassesOnAdd();
1033
#endif
1034
1035
// More redundant bind elimination.
1036
if (curRenderStep_) {
1037
if (curRenderStep_->commands.empty()) {
1038
if (curRenderStep_->render.colorLoad != VKRRenderPassLoadAction::CLEAR && curRenderStep_->render.depthLoad != VKRRenderPassLoadAction::CLEAR && curRenderStep_->render.stencilLoad != VKRRenderPassLoadAction::CLEAR) {
1039
// Can trivially kill the last empty render step.
1040
_dbg_assert_(steps_.back() == curRenderStep_);
1041
delete steps_.back();
1042
steps_.pop_back();
1043
curRenderStep_ = nullptr;
1044
}
1045
VLOG("Empty render step. Usually happens after uploading pixels..");
1046
}
1047
1048
EndCurRenderStep();
1049
}
1050
1051
// Sanity check that we don't have binds to the backbuffer before binds to other buffers. It must always be bound last.
1052
if (steps_.size() >= 1 && steps_.back()->stepType == VKRStepType::RENDER && steps_.back()->render.framebuffer == nullptr && fb != nullptr) {
1053
_dbg_assert_(false);
1054
}
1055
1056
// Older Mali drivers have issues with depth and stencil don't match load/clear/etc.
1057
// TODO: Determine which versions and do this only where necessary.
1058
u32 lateClearMask = 0;
1059
if (depth != stencil && vulkan_->GetPhysicalDeviceProperties().properties.vendorID == VULKAN_VENDOR_ARM) {
1060
if (stencil == VKRRenderPassLoadAction::DONT_CARE) {
1061
stencil = depth;
1062
} else if (depth == VKRRenderPassLoadAction::DONT_CARE) {
1063
depth = stencil;
1064
} else if (stencil == VKRRenderPassLoadAction::CLEAR) {
1065
depth = stencil;
1066
lateClearMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
1067
} else if (depth == VKRRenderPassLoadAction::CLEAR) {
1068
stencil = depth;
1069
lateClearMask |= VK_IMAGE_ASPECT_DEPTH_BIT;
1070
}
1071
}
1072
1073
VKRStep *step = new VKRStep{ VKRStepType::RENDER };
1074
step->render.framebuffer = fb;
1075
step->render.colorLoad = color;
1076
step->render.depthLoad = depth;
1077
step->render.stencilLoad = stencil;
1078
step->render.colorStore = VKRRenderPassStoreAction::STORE;
1079
step->render.depthStore = VKRRenderPassStoreAction::STORE;
1080
step->render.stencilStore = VKRRenderPassStoreAction::STORE;
1081
step->render.clearColor = clearColor;
1082
step->render.clearDepth = clearDepth;
1083
step->render.clearStencil = clearStencil;
1084
step->render.numDraws = 0;
1085
step->render.numReads = 0;
1086
step->render.finalColorLayout = !fb ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL : VK_IMAGE_LAYOUT_UNDEFINED;
1087
step->render.finalDepthStencilLayout = !fb ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL : VK_IMAGE_LAYOUT_UNDEFINED;
1088
// pipelineFlags, renderArea and renderPassType get filled in when we finalize the step. Do not read from them before that.
1089
step->tag = tag;
1090
steps_.push_back(step);
1091
1092
if (fb) {
1093
// If there's a KEEP, we naturally read from the framebuffer.
1094
if (color == VKRRenderPassLoadAction::KEEP || depth == VKRRenderPassLoadAction::KEEP || stencil == VKRRenderPassLoadAction::KEEP) {
1095
step->dependencies.insert(fb);
1096
}
1097
}
1098
1099
curRenderStep_ = step;
1100
curStepHasViewport_ = false;
1101
curStepHasScissor_ = false;
1102
if (fb) {
1103
curWidthRaw_ = fb->width;
1104
curHeightRaw_ = fb->height;
1105
curWidth_ = fb->width;
1106
curHeight_ = fb->height;
1107
} else {
1108
curWidthRaw_ = vulkan_->GetBackbufferWidth();
1109
curHeightRaw_ = vulkan_->GetBackbufferHeight();
1110
if (g_display.rotation == DisplayRotation::ROTATE_90 ||
1111
g_display.rotation == DisplayRotation::ROTATE_270) {
1112
curWidth_ = curHeightRaw_;
1113
curHeight_ = curWidthRaw_;
1114
} else {
1115
curWidth_ = curWidthRaw_;
1116
curHeight_ = curHeightRaw_;
1117
}
1118
}
1119
1120
if (color == VKRRenderPassLoadAction::CLEAR || depth == VKRRenderPassLoadAction::CLEAR || stencil == VKRRenderPassLoadAction::CLEAR) {
1121
curRenderArea_.SetRect(0, 0, curWidth_, curHeight_);
1122
}
1123
1124
// See above - we add a clear afterward if only one side for depth/stencil CLEAR/KEEP.
1125
if (lateClearMask != 0) {
1126
VkRenderData data{ VKRRenderCommand::CLEAR };
1127
data.clear.clearColor = clearColor;
1128
data.clear.clearZ = clearDepth;
1129
data.clear.clearStencil = clearStencil;
1130
data.clear.clearMask = lateClearMask;
1131
curRenderStep_->commands.push_back(data);
1132
}
1133
1134
if (invalidationCallback_) {
1135
invalidationCallback_(InvalidationCallbackFlags::RENDER_PASS_STATE);
1136
}
1137
}
1138
1139
bool VulkanRenderManager::CopyFramebufferToMemory(VKRFramebuffer *src, VkImageAspectFlags aspectBits, int x, int y, int w, int h, Draw::DataFormat destFormat, uint8_t *pixels, int pixelStride, Draw::ReadbackMode mode, const char *tag) {
1140
_dbg_assert_(insideFrame_);
1141
1142
for (int i = (int)steps_.size() - 1; i >= 0; i--) {
1143
if (steps_[i]->stepType == VKRStepType::RENDER && steps_[i]->render.framebuffer == src) {
1144
steps_[i]->render.numReads++;
1145
break;
1146
}
1147
}
1148
1149
EndCurRenderStep();
1150
1151
VKRStep *step = new VKRStep{ VKRStepType::READBACK };
1152
step->readback.aspectMask = aspectBits;
1153
step->readback.src = src;
1154
step->readback.srcRect.offset = { x, y };
1155
step->readback.srcRect.extent = { (uint32_t)w, (uint32_t)h };
1156
step->readback.delayed = mode == Draw::ReadbackMode::OLD_DATA_OK;
1157
step->dependencies.insert(src);
1158
step->tag = tag;
1159
steps_.push_back(step);
1160
1161
if (mode == Draw::ReadbackMode::BLOCK) {
1162
FlushSync();
1163
}
1164
1165
Draw::DataFormat srcFormat = Draw::DataFormat::UNDEFINED;
1166
if (aspectBits & VK_IMAGE_ASPECT_COLOR_BIT) {
1167
if (src) {
1168
switch (src->color.format) {
1169
case VK_FORMAT_R8G8B8A8_UNORM: srcFormat = Draw::DataFormat::R8G8B8A8_UNORM; break;
1170
default: _assert_(false);
1171
}
1172
} else {
1173
// Backbuffer.
1174
if (!(vulkan_->GetSurfaceCapabilities().supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
1175
ERROR_LOG(Log::G3D, "Copying from backbuffer not supported, can't take screenshots");
1176
return false;
1177
}
1178
switch (vulkan_->GetSwapchainFormat()) {
1179
case VK_FORMAT_B8G8R8A8_UNORM: srcFormat = Draw::DataFormat::B8G8R8A8_UNORM; break;
1180
case VK_FORMAT_R8G8B8A8_UNORM: srcFormat = Draw::DataFormat::R8G8B8A8_UNORM; break;
1181
// NOTE: If you add supported formats here, make sure to also support them in VulkanQueueRunner::CopyReadbackBuffer.
1182
default:
1183
ERROR_LOG(Log::G3D, "Unsupported backbuffer format for screenshots");
1184
return false;
1185
}
1186
}
1187
} else if (aspectBits & VK_IMAGE_ASPECT_STENCIL_BIT) {
1188
// Copies from stencil are always S8.
1189
srcFormat = Draw::DataFormat::S8;
1190
} else if (aspectBits & VK_IMAGE_ASPECT_DEPTH_BIT) {
1191
switch (src->depth.format) {
1192
case VK_FORMAT_D24_UNORM_S8_UINT: srcFormat = Draw::DataFormat::D24_S8; break;
1193
case VK_FORMAT_D32_SFLOAT_S8_UINT: srcFormat = Draw::DataFormat::D32F; break;
1194
case VK_FORMAT_D16_UNORM_S8_UINT: srcFormat = Draw::DataFormat::D16; break;
1195
default: _assert_(false);
1196
}
1197
} else {
1198
_assert_(false);
1199
}
1200
1201
// Need to call this after FlushSync so the pixels are guaranteed to be ready in CPU-accessible VRAM.
1202
return queueRunner_.CopyReadbackBuffer(frameData_[vulkan_->GetCurFrame()],
1203
mode == Draw::ReadbackMode::OLD_DATA_OK ? src : nullptr, w, h, srcFormat, destFormat, pixelStride, pixels);
1204
}
1205
1206
void VulkanRenderManager::CopyImageToMemorySync(VkImage image, int mipLevel, int x, int y, int w, int h, Draw::DataFormat destFormat, uint8_t *pixels, int pixelStride, const char *tag) {
1207
_dbg_assert_(insideFrame_);
1208
1209
EndCurRenderStep();
1210
1211
VKRStep *step = new VKRStep{ VKRStepType::READBACK_IMAGE };
1212
step->readback_image.image = image;
1213
step->readback_image.srcRect.offset = { x, y };
1214
step->readback_image.srcRect.extent = { (uint32_t)w, (uint32_t)h };
1215
step->readback_image.mipLevel = mipLevel;
1216
step->tag = tag;
1217
steps_.push_back(step);
1218
1219
FlushSync();
1220
1221
// Need to call this after FlushSync so the pixels are guaranteed to be ready in CPU-accessible VRAM.
1222
queueRunner_.CopyReadbackBuffer(frameData_[vulkan_->GetCurFrame()], nullptr, w, h, destFormat, destFormat, pixelStride, pixels);
1223
1224
_dbg_assert_(steps_.empty());
1225
}
1226
1227
static void RemoveDrawCommands(FastVec<VkRenderData> *cmds) {
1228
// Here we remove any DRAW type commands when we hit a CLEAR.
1229
for (auto &c : *cmds) {
1230
if (c.cmd == VKRRenderCommand::DRAW || c.cmd == VKRRenderCommand::DRAW_INDEXED) {
1231
c.cmd = VKRRenderCommand::REMOVED;
1232
}
1233
}
1234
}
1235
1236
static void CleanupRenderCommands(FastVec<VkRenderData> *cmds) {
1237
size_t lastCommand[(int)VKRRenderCommand::NUM_RENDER_COMMANDS];
1238
memset(lastCommand, -1, sizeof(lastCommand));
1239
1240
// Find any duplicate state commands (likely from RemoveDrawCommands.)
1241
for (size_t i = 0; i < cmds->size(); ++i) {
1242
auto &c = cmds->at(i);
1243
auto &lastOfCmd = lastCommand[(uint8_t)c.cmd];
1244
1245
switch (c.cmd) {
1246
case VKRRenderCommand::REMOVED:
1247
continue;
1248
1249
case VKRRenderCommand::VIEWPORT:
1250
case VKRRenderCommand::SCISSOR:
1251
case VKRRenderCommand::BLEND:
1252
case VKRRenderCommand::STENCIL:
1253
if (lastOfCmd != -1) {
1254
cmds->at(lastOfCmd).cmd = VKRRenderCommand::REMOVED;
1255
}
1256
break;
1257
1258
case VKRRenderCommand::PUSH_CONSTANTS:
1259
// TODO: For now, we have to keep this one (it has an offset.) Still update lastCommand.
1260
break;
1261
1262
case VKRRenderCommand::CLEAR:
1263
// Ignore, doesn't participate in state.
1264
continue;
1265
1266
case VKRRenderCommand::DRAW_INDEXED:
1267
case VKRRenderCommand::DRAW:
1268
default:
1269
// Boundary - must keep state before this.
1270
memset(lastCommand, -1, sizeof(lastCommand));
1271
continue;
1272
}
1273
1274
lastOfCmd = i;
1275
}
1276
1277
// At this point, anything in lastCommand can be cleaned up too.
1278
// Note that it's safe to remove the last unused PUSH_CONSTANTS here.
1279
for (size_t i = 0; i < ARRAY_SIZE(lastCommand); ++i) {
1280
auto &lastOfCmd = lastCommand[i];
1281
if (lastOfCmd != -1) {
1282
cmds->at(lastOfCmd).cmd = VKRRenderCommand::REMOVED;
1283
}
1284
}
1285
}
1286
1287
void VulkanRenderManager::Clear(uint32_t clearColor, float clearZ, int clearStencil, int clearMask) {
1288
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
1289
if (!clearMask)
1290
return;
1291
1292
// If this is the first drawing command or clears everything, merge it into the pass.
1293
int allAspects = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
1294
if (curRenderStep_->render.numDraws == 0 || clearMask == allAspects) {
1295
curRenderStep_->render.clearColor = clearColor;
1296
curRenderStep_->render.clearDepth = clearZ;
1297
curRenderStep_->render.clearStencil = clearStencil;
1298
curRenderStep_->render.colorLoad = (clearMask & VK_IMAGE_ASPECT_COLOR_BIT) ? VKRRenderPassLoadAction::CLEAR : VKRRenderPassLoadAction::KEEP;
1299
curRenderStep_->render.depthLoad = (clearMask & VK_IMAGE_ASPECT_DEPTH_BIT) ? VKRRenderPassLoadAction::CLEAR : VKRRenderPassLoadAction::KEEP;
1300
curRenderStep_->render.stencilLoad = (clearMask & VK_IMAGE_ASPECT_STENCIL_BIT) ? VKRRenderPassLoadAction::CLEAR : VKRRenderPassLoadAction::KEEP;
1301
1302
if (clearMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1303
if (curRenderStep_->render.framebuffer && !curRenderStep_->render.framebuffer->HasDepth()) {
1304
WARN_LOG(Log::G3D, "Trying to clear depth/stencil on a non-depth framebuffer: %s", curRenderStep_->render.framebuffer->Tag());
1305
} else {
1306
curPipelineFlags_ |= PipelineFlags::USES_DEPTH_STENCIL;
1307
}
1308
}
1309
1310
// In case there were commands already.
1311
curRenderStep_->render.numDraws = 0;
1312
RemoveDrawCommands(&curRenderStep_->commands);
1313
} else {
1314
VkRenderData data{ VKRRenderCommand::CLEAR };
1315
data.clear.clearColor = clearColor;
1316
data.clear.clearZ = clearZ;
1317
data.clear.clearStencil = clearStencil;
1318
data.clear.clearMask = clearMask;
1319
curRenderStep_->commands.push_back(data);
1320
}
1321
1322
curRenderArea_.SetRect(0, 0, curWidth_, curHeight_);
1323
}
1324
1325
void VulkanRenderManager::CopyFramebuffer(VKRFramebuffer *src, VkRect2D srcRect, VKRFramebuffer *dst, VkOffset2D dstPos, VkImageAspectFlags aspectMask, const char *tag) {
1326
#ifdef _DEBUG
1327
SanityCheckPassesOnAdd();
1328
#endif
1329
1330
_dbg_assert_msg_(srcRect.offset.x >= 0, "srcrect offset x (%d) < 0", srcRect.offset.x);
1331
_dbg_assert_msg_(srcRect.offset.y >= 0, "srcrect offset y (%d) < 0", srcRect.offset.y);
1332
_dbg_assert_msg_(srcRect.offset.x + srcRect.extent.width <= (uint32_t)src->width, "srcrect offset x (%d) + extent (%d) > width (%d)", srcRect.offset.x, srcRect.extent.width, (uint32_t)src->width);
1333
_dbg_assert_msg_(srcRect.offset.y + srcRect.extent.height <= (uint32_t)src->height, "srcrect offset y (%d) + extent (%d) > height (%d)", srcRect.offset.y, srcRect.extent.height, (uint32_t)src->height);
1334
1335
_dbg_assert_msg_(srcRect.extent.width > 0, "copy srcwidth == 0");
1336
_dbg_assert_msg_(srcRect.extent.height > 0, "copy srcheight == 0");
1337
1338
_dbg_assert_msg_(dstPos.x >= 0, "dstPos offset x (%d) < 0", dstPos.x);
1339
_dbg_assert_msg_(dstPos.y >= 0, "dstPos offset y (%d) < 0", dstPos.y);
1340
_dbg_assert_msg_(dstPos.x + srcRect.extent.width <= (uint32_t)dst->width, "dstPos + extent x > width");
1341
_dbg_assert_msg_(dstPos.y + srcRect.extent.height <= (uint32_t)dst->height, "dstPos + extent y > height");
1342
1343
for (int i = (int)steps_.size() - 1; i >= 0; i--) {
1344
if (steps_[i]->stepType == VKRStepType::RENDER && steps_[i]->render.framebuffer == src) {
1345
if (aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1346
if (steps_[i]->render.finalColorLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1347
steps_[i]->render.finalColorLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1348
}
1349
}
1350
if (aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1351
if (steps_[i]->render.finalDepthStencilLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1352
steps_[i]->render.finalDepthStencilLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1353
}
1354
}
1355
steps_[i]->render.numReads++;
1356
break;
1357
}
1358
}
1359
for (int i = (int)steps_.size() - 1; i >= 0; i--) {
1360
if (steps_[i]->stepType == VKRStepType::RENDER && steps_[i]->render.framebuffer == dst) {
1361
if (aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1362
if (steps_[i]->render.finalColorLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1363
steps_[i]->render.finalColorLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1364
}
1365
}
1366
if (aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1367
if (steps_[i]->render.finalDepthStencilLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1368
steps_[i]->render.finalDepthStencilLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1369
}
1370
}
1371
break;
1372
}
1373
}
1374
1375
EndCurRenderStep();
1376
1377
VKRStep *step = new VKRStep{ VKRStepType::COPY };
1378
1379
step->copy.aspectMask = aspectMask;
1380
step->copy.src = src;
1381
step->copy.srcRect = srcRect;
1382
step->copy.dst = dst;
1383
step->copy.dstPos = dstPos;
1384
step->dependencies.insert(src);
1385
step->tag = tag;
1386
bool fillsDst = dst && srcRect.offset.x == 0 && srcRect.offset.y == 0 && srcRect.extent.width == dst->width && srcRect.extent.height == dst->height;
1387
if (dstPos.x != 0 || dstPos.y != 0 || !fillsDst)
1388
step->dependencies.insert(dst);
1389
1390
steps_.push_back(step);
1391
}
1392
1393
void VulkanRenderManager::BlitFramebuffer(VKRFramebuffer *src, VkRect2D srcRect, VKRFramebuffer *dst, VkRect2D dstRect, VkImageAspectFlags aspectMask, VkFilter filter, const char *tag) {
1394
#ifdef _DEBUG
1395
SanityCheckPassesOnAdd();
1396
#endif
1397
1398
_dbg_assert_msg_(srcRect.offset.x >= 0, "srcrect offset x (%d) < 0", srcRect.offset.x);
1399
_dbg_assert_msg_(srcRect.offset.y >= 0, "srcrect offset y (%d) < 0", srcRect.offset.y);
1400
_dbg_assert_msg_(srcRect.offset.x + srcRect.extent.width <= (uint32_t)src->width, "srcrect offset x (%d) + extent (%d) > width (%d)", srcRect.offset.x, srcRect.extent.width, (uint32_t)src->width);
1401
_dbg_assert_msg_(srcRect.offset.y + srcRect.extent.height <= (uint32_t)src->height, "srcrect offset y (%d) + extent (%d) > height (%d)", srcRect.offset.y, srcRect.extent.height, (uint32_t)src->height);
1402
1403
_dbg_assert_msg_(srcRect.extent.width > 0, "blit srcwidth == 0");
1404
_dbg_assert_msg_(srcRect.extent.height > 0, "blit srcheight == 0");
1405
1406
_dbg_assert_msg_(dstRect.offset.x >= 0, "dstrect offset x < 0");
1407
_dbg_assert_msg_(dstRect.offset.y >= 0, "dstrect offset y < 0");
1408
_dbg_assert_msg_(dstRect.offset.x + dstRect.extent.width <= (uint32_t)dst->width, "dstrect offset x + extent > width");
1409
_dbg_assert_msg_(dstRect.offset.y + dstRect.extent.height <= (uint32_t)dst->height, "dstrect offset y + extent > height");
1410
1411
_dbg_assert_msg_(dstRect.extent.width > 0, "blit dstwidth == 0");
1412
_dbg_assert_msg_(dstRect.extent.height > 0, "blit dstheight == 0");
1413
1414
// TODO: Seem to be missing final layouts here like in Copy...
1415
1416
for (int i = (int)steps_.size() - 1; i >= 0; i--) {
1417
if (steps_[i]->stepType == VKRStepType::RENDER && steps_[i]->render.framebuffer == src) {
1418
steps_[i]->render.numReads++;
1419
break;
1420
}
1421
}
1422
1423
// Sanity check. Added an assert to try to gather more info.
1424
// Got this assert in NPJH50443 FINAL FANTASY TYPE-0, but pretty rare. Moving back to debug assert.
1425
if (aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1426
_dbg_assert_msg_(src->depth.image != VK_NULL_HANDLE, "%s", src->Tag());
1427
_dbg_assert_msg_(dst->depth.image != VK_NULL_HANDLE, "%s", dst->Tag());
1428
1429
if (!src->depth.image || !dst->depth.image) {
1430
// Something has gone wrong, but let's try to stumble along.
1431
return;
1432
}
1433
}
1434
1435
EndCurRenderStep();
1436
1437
VKRStep *step = new VKRStep{ VKRStepType::BLIT };
1438
step->blit.aspectMask = aspectMask;
1439
step->blit.src = src;
1440
step->blit.srcRect = srcRect;
1441
step->blit.dst = dst;
1442
step->blit.dstRect = dstRect;
1443
step->blit.filter = filter;
1444
step->dependencies.insert(src);
1445
step->tag = tag;
1446
bool fillsDst = dst && dstRect.offset.x == 0 && dstRect.offset.y == 0 && dstRect.extent.width == dst->width && dstRect.extent.height == dst->height;
1447
if (!fillsDst)
1448
step->dependencies.insert(dst);
1449
1450
steps_.push_back(step);
1451
}
1452
1453
VkImageView VulkanRenderManager::BindFramebufferAsTexture(VKRFramebuffer *fb, int binding, VkImageAspectFlags aspectBit, int layer) {
1454
_dbg_assert_(curRenderStep_ != nullptr);
1455
_dbg_assert_(fb != nullptr);
1456
1457
// We don't support texturing from stencil, neither do we support texturing from depth|stencil together (nonsensical).
1458
_dbg_assert_(aspectBit == VK_IMAGE_ASPECT_COLOR_BIT || aspectBit == VK_IMAGE_ASPECT_DEPTH_BIT);
1459
1460
// Mark the dependency, check for required transitions, and return the image.
1461
1462
// Optimization: If possible, use final*Layout to put the texture into the correct layout "early".
1463
for (int i = (int)steps_.size() - 1; i >= 0; i--) {
1464
if (steps_[i]->stepType == VKRStepType::RENDER && steps_[i]->render.framebuffer == fb) {
1465
if (aspectBit == VK_IMAGE_ASPECT_COLOR_BIT) {
1466
// If this framebuffer was rendered to earlier in this frame, make sure to pre-transition it to the correct layout.
1467
if (steps_[i]->render.finalColorLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1468
steps_[i]->render.finalColorLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1469
}
1470
// If we find some other layout, a copy after this is likely involved. It's fine though,
1471
// we'll just transition it right as we need it and lose a tiny optimization.
1472
} else if (aspectBit == VK_IMAGE_ASPECT_DEPTH_BIT) {
1473
// If this framebuffer was rendered to earlier in this frame, make sure to pre-transition it to the correct layout.
1474
if (steps_[i]->render.finalDepthStencilLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1475
steps_[i]->render.finalDepthStencilLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1476
}
1477
} // We don't (yet?) support texturing from stencil images.
1478
steps_[i]->render.numReads++;
1479
break;
1480
}
1481
}
1482
1483
// Track dependencies fully.
1484
curRenderStep_->dependencies.insert(fb);
1485
1486
// Add this pretransition unless we already have it.
1487
TransitionRequest rq{ fb, aspectBit, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL };
1488
curRenderStep_->preTransitions.insert(rq); // Note that insert avoids inserting duplicates.
1489
1490
if (layer == -1) {
1491
return aspectBit == VK_IMAGE_ASPECT_COLOR_BIT ? fb->color.texAllLayersView : fb->depth.texAllLayersView;
1492
} else {
1493
return aspectBit == VK_IMAGE_ASPECT_COLOR_BIT ? fb->color.texLayerViews[layer] : fb->depth.texLayerViews[layer];
1494
}
1495
}
1496
1497
// Called on main thread.
1498
// Sends the collected commands to the render thread. Submit-latency should be
1499
// measured from here, probably.
1500
void VulkanRenderManager::Finish() {
1501
EndCurRenderStep();
1502
1503
// Let's do just a bit of cleanup on render commands now.
1504
// TODO: Should look into removing this.
1505
for (auto &step : steps_) {
1506
if (step->stepType == VKRStepType::RENDER) {
1507
CleanupRenderCommands(&step->commands);
1508
}
1509
}
1510
1511
int curFrame = vulkan_->GetCurFrame();
1512
FrameData &frameData = frameData_[curFrame];
1513
1514
if (!postInitBarrier_.empty()) {
1515
VkCommandBuffer buffer = frameData.GetInitCmd(vulkan_);
1516
postInitBarrier_.Flush(buffer);
1517
}
1518
1519
VLOG("PUSH: Frame[%d]", curFrame);
1520
VKRRenderThreadTask *task = new VKRRenderThreadTask(VKRRunType::SUBMIT);
1521
task->frame = curFrame;
1522
if (useRenderThread_) {
1523
std::unique_lock<std::mutex> lock(pushMutex_);
1524
renderThreadQueue_.push(task);
1525
renderThreadQueue_.back()->steps = std::move(steps_);
1526
pushCondVar_.notify_one();
1527
} else {
1528
// Just do it!
1529
task->steps = std::move(steps_);
1530
Run(*task);
1531
delete task;
1532
}
1533
1534
steps_.clear();
1535
}
1536
1537
void VulkanRenderManager::Present() {
1538
int curFrame = vulkan_->GetCurFrame();
1539
1540
VKRRenderThreadTask *task = new VKRRenderThreadTask(VKRRunType::PRESENT);
1541
task->frame = curFrame;
1542
if (useRenderThread_) {
1543
std::unique_lock<std::mutex> lock(pushMutex_);
1544
renderThreadQueue_.push(task);
1545
pushCondVar_.notify_one();
1546
} else {
1547
// Just do it!
1548
Run(*task);
1549
delete task;
1550
}
1551
1552
vulkan_->EndFrame();
1553
insideFrame_ = false;
1554
}
1555
1556
// Called on the render thread.
1557
//
1558
// Can be called again after a VKRRunType::SYNC on the same frame.
1559
void VulkanRenderManager::Run(VKRRenderThreadTask &task) {
1560
FrameData &frameData = frameData_[task.frame];
1561
1562
if (task.runType == VKRRunType::PRESENT) {
1563
if (!frameData.skipSwap) {
1564
VkResult res = frameData.QueuePresent(vulkan_, frameDataShared_);
1565
frameTimeHistory_[frameData.frameId].queuePresent = time_now_d();
1566
if (res == VK_ERROR_OUT_OF_DATE_KHR) {
1567
// We clearly didn't get this in vkAcquireNextImageKHR because of the skipSwap check above.
1568
// Do the increment.
1569
outOfDateFrames_++;
1570
} else if (res == VK_SUBOPTIMAL_KHR) {
1571
outOfDateFrames_++;
1572
} else if (res != VK_SUCCESS) {
1573
_assert_msg_(false, "vkQueuePresentKHR failed! result=%s", VulkanResultToString(res));
1574
} else {
1575
// Success
1576
outOfDateFrames_ = 0;
1577
}
1578
} else {
1579
// We only get here if vkAcquireNextImage returned VK_ERROR_OUT_OF_DATE.
1580
if (vulkan_->HasRealSwapchain()) {
1581
outOfDateFrames_++;
1582
}
1583
frameData.skipSwap = false;
1584
}
1585
return;
1586
}
1587
1588
_dbg_assert_(!frameData.hasPresentCommands);
1589
1590
if (!frameTimeHistory_[frameData.frameId].firstSubmit) {
1591
frameTimeHistory_[frameData.frameId].firstSubmit = time_now_d();
1592
}
1593
frameData.Submit(vulkan_, FrameSubmitType::Pending, frameDataShared_);
1594
1595
// Flush descriptors.
1596
double descStart = time_now_d();
1597
FlushDescriptors(task.frame);
1598
frameData.profile.descWriteTime = time_now_d() - descStart;
1599
1600
if (!frameData.hasMainCommands) {
1601
// Effectively resets both main and present command buffers, since they both live in this pool.
1602
// We always record main commands first, so we don't need to reset the present command buffer separately.
1603
vkResetCommandPool(vulkan_->GetDevice(), frameData.cmdPoolMain, 0);
1604
1605
VkCommandBufferBeginInfo begin{ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
1606
begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
1607
VkResult res = vkBeginCommandBuffer(frameData.mainCmd, &begin);
1608
frameData.hasMainCommands = true;
1609
_assert_msg_(res == VK_SUCCESS, "vkBeginCommandBuffer failed! result=%s", VulkanResultToString(res));
1610
}
1611
1612
queueRunner_.PreprocessSteps(task.steps);
1613
// Likely during shutdown, happens in headless.
1614
if (task.steps.empty() && !frameData.hasAcquired)
1615
frameData.skipSwap = true;
1616
//queueRunner_.LogSteps(stepsOnThread, false);
1617
queueRunner_.RunSteps(task.steps, task.frame, frameData, frameDataShared_);
1618
1619
switch (task.runType) {
1620
case VKRRunType::SUBMIT:
1621
frameData.Submit(vulkan_, FrameSubmitType::FinishFrame, frameDataShared_);
1622
break;
1623
1624
case VKRRunType::SYNC:
1625
// The submit will trigger the readbackFence, and also do the wait for it.
1626
frameData.Submit(vulkan_, FrameSubmitType::Sync, frameDataShared_);
1627
1628
if (useRenderThread_) {
1629
std::unique_lock<std::mutex> lock(syncMutex_);
1630
syncCondVar_.notify_one();
1631
}
1632
1633
// At this point the GPU is idle, and we can resume filling the command buffers for the
1634
// current frame since and thus all previously enqueued command buffers have been
1635
// processed. No need to switch to the next frame number, would just be confusing.
1636
break;
1637
1638
default:
1639
_dbg_assert_(false);
1640
}
1641
1642
VLOG("PULL: Finished running frame %d", task.frame);
1643
}
1644
1645
// Called from main thread.
1646
void VulkanRenderManager::FlushSync() {
1647
_dbg_assert_(!curRenderStep_);
1648
1649
if (invalidationCallback_) {
1650
invalidationCallback_(InvalidationCallbackFlags::COMMAND_BUFFER_STATE);
1651
}
1652
1653
int curFrame = vulkan_->GetCurFrame();
1654
FrameData &frameData = frameData_[curFrame];
1655
1656
if (!postInitBarrier_.empty()) {
1657
VkCommandBuffer buffer = frameData.GetInitCmd(vulkan_);
1658
postInitBarrier_.Flush(buffer);
1659
}
1660
1661
if (useRenderThread_) {
1662
{
1663
VLOG("PUSH: Frame[%d]", curFrame);
1664
VKRRenderThreadTask *task = new VKRRenderThreadTask(VKRRunType::SYNC);
1665
task->frame = curFrame;
1666
{
1667
std::unique_lock<std::mutex> lock(pushMutex_);
1668
renderThreadQueue_.push(task);
1669
renderThreadQueue_.back()->steps = std::move(steps_);
1670
pushCondVar_.notify_one();
1671
}
1672
steps_.clear();
1673
}
1674
1675
{
1676
std::unique_lock<std::mutex> lock(syncMutex_);
1677
// Wait for the flush to be hit, since we're syncing.
1678
while (!frameData.syncDone) {
1679
VLOG("PUSH: Waiting for frame[%d].syncDone = 1 (sync)", curFrame);
1680
syncCondVar_.wait(lock);
1681
}
1682
frameData.syncDone = false;
1683
}
1684
} else {
1685
VKRRenderThreadTask task(VKRRunType::SYNC);
1686
task.frame = curFrame;
1687
task.steps = std::move(steps_);
1688
Run(task);
1689
steps_.clear();
1690
}
1691
}
1692
1693
void VulkanRenderManager::ResetStats() {
1694
initTimeMs_.Reset();
1695
totalGPUTimeMs_.Reset();
1696
renderCPUTimeMs_.Reset();
1697
}
1698
1699
VKRPipelineLayout *VulkanRenderManager::CreatePipelineLayout(BindingType *bindingTypes, size_t bindingTypesCount, bool geoShadersEnabled, const char *tag) {
1700
VKRPipelineLayout *layout = new VKRPipelineLayout();
1701
layout->SetTag(tag);
1702
layout->bindingTypesCount = (uint32_t)bindingTypesCount;
1703
1704
_dbg_assert_(bindingTypesCount <= ARRAY_SIZE(layout->bindingTypes));
1705
memcpy(layout->bindingTypes, bindingTypes, sizeof(BindingType) * bindingTypesCount);
1706
1707
VkDescriptorSetLayoutBinding bindings[VKRPipelineLayout::MAX_DESC_SET_BINDINGS];
1708
for (int i = 0; i < (int)bindingTypesCount; i++) {
1709
bindings[i].binding = i;
1710
bindings[i].descriptorCount = 1;
1711
bindings[i].pImmutableSamplers = nullptr;
1712
1713
switch (bindingTypes[i]) {
1714
case BindingType::COMBINED_IMAGE_SAMPLER:
1715
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1716
bindings[i].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
1717
break;
1718
case BindingType::UNIFORM_BUFFER_DYNAMIC_VERTEX:
1719
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1720
bindings[i].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
1721
break;
1722
case BindingType::UNIFORM_BUFFER_DYNAMIC_ALL:
1723
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1724
bindings[i].stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
1725
if (geoShadersEnabled) {
1726
bindings[i].stageFlags |= VK_SHADER_STAGE_GEOMETRY_BIT;
1727
}
1728
break;
1729
case BindingType::STORAGE_BUFFER_VERTEX:
1730
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1731
bindings[i].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
1732
break;
1733
case BindingType::STORAGE_BUFFER_COMPUTE:
1734
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1735
bindings[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
1736
break;
1737
case BindingType::STORAGE_IMAGE_COMPUTE:
1738
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1739
bindings[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
1740
break;
1741
default:
1742
_dbg_assert_(false);
1743
break;
1744
}
1745
}
1746
1747
VkDescriptorSetLayoutCreateInfo dsl = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO };
1748
dsl.bindingCount = (uint32_t)bindingTypesCount;
1749
dsl.pBindings = bindings;
1750
VkResult res = vkCreateDescriptorSetLayout(vulkan_->GetDevice(), &dsl, nullptr, &layout->descriptorSetLayout);
1751
_assert_(VK_SUCCESS == res && layout->descriptorSetLayout);
1752
1753
VkPipelineLayoutCreateInfo pl = { VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO };
1754
VkDescriptorSetLayout setLayouts[1] = { layout->descriptorSetLayout };
1755
pl.setLayoutCount = ARRAY_SIZE(setLayouts);
1756
pl.pSetLayouts = setLayouts;
1757
res = vkCreatePipelineLayout(vulkan_->GetDevice(), &pl, nullptr, &layout->pipelineLayout);
1758
_assert_(VK_SUCCESS == res && layout->pipelineLayout);
1759
1760
vulkan_->SetDebugName(layout->descriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, tag);
1761
vulkan_->SetDebugName(layout->pipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT, tag);
1762
1763
for (int i = 0; i < VulkanContext::MAX_INFLIGHT_FRAMES; i++) {
1764
// Some games go beyond 1024 and end up having to resize like GTA, but most stay below so we start there.
1765
layout->frameData[i].pool.Create(vulkan_, bindingTypes, (uint32_t)bindingTypesCount, 1024);
1766
}
1767
1768
pipelineLayouts_.push_back(layout);
1769
return layout;
1770
}
1771
1772
void VulkanRenderManager::DestroyPipelineLayout(VKRPipelineLayout *layout) {
1773
for (auto iter = pipelineLayouts_.begin(); iter != pipelineLayouts_.end(); iter++) {
1774
if (*iter == layout) {
1775
pipelineLayouts_.erase(iter);
1776
break;
1777
}
1778
}
1779
vulkan_->Delete().QueueCallback([](VulkanContext *vulkan, void *userdata) {
1780
VKRPipelineLayout *layout = (VKRPipelineLayout *)userdata;
1781
for (int i = 0; i < VulkanContext::MAX_INFLIGHT_FRAMES; i++) {
1782
layout->frameData[i].pool.DestroyImmediately();
1783
}
1784
vkDestroyPipelineLayout(vulkan->GetDevice(), layout->pipelineLayout, nullptr);
1785
vkDestroyDescriptorSetLayout(vulkan->GetDevice(), layout->descriptorSetLayout, nullptr);
1786
1787
delete layout;
1788
}, layout);
1789
}
1790
1791
void VulkanRenderManager::FlushDescriptors(int frame) {
1792
for (auto iter : pipelineLayouts_) {
1793
iter->FlushDescSets(vulkan_, frame, &frameData_[frame].profile);
1794
}
1795
}
1796
1797
void VulkanRenderManager::ResetDescriptorLists(int frame) {
1798
for (auto iter : pipelineLayouts_) {
1799
VKRPipelineLayout::FrameData &data = iter->frameData[frame];
1800
1801
data.flushedDescriptors_ = 0;
1802
data.descSets_.clear();
1803
data.descData_.clear();
1804
}
1805
}
1806
1807
VKRPipelineLayout::~VKRPipelineLayout() {
1808
_assert_(frameData[0].pool.IsDestroyed());
1809
}
1810
1811
void VKRPipelineLayout::FlushDescSets(VulkanContext *vulkan, int frame, QueueProfileContext *profile) {
1812
_dbg_assert_(frame < VulkanContext::MAX_INFLIGHT_FRAMES);
1813
1814
FrameData &data = frameData[frame];
1815
1816
VulkanDescSetPool &pool = data.pool;
1817
FastVec<PackedDescriptor> &descData = data.descData_;
1818
FastVec<PendingDescSet> &descSets = data.descSets_;
1819
1820
pool.Reset();
1821
1822
VkDescriptorSet setCache[8];
1823
VkDescriptorSetLayout layoutsForAlloc[ARRAY_SIZE(setCache)];
1824
for (int i = 0; i < ARRAY_SIZE(setCache); i++) {
1825
layoutsForAlloc[i] = descriptorSetLayout;
1826
}
1827
int setsUsed = ARRAY_SIZE(setCache); // To allocate immediately.
1828
1829
// This will write all descriptors.
1830
// Initially, we just do a simple look-back comparing to the previous descriptor to avoid sequential dupes.
1831
// In theory, we could multithread this. Gotta be a lot of descriptors for that to be worth it though.
1832
1833
// Initially, let's do naive single desc set writes.
1834
VkWriteDescriptorSet writes[MAX_DESC_SET_BINDINGS];
1835
VkDescriptorImageInfo imageInfo[MAX_DESC_SET_BINDINGS]; // just picked a practical number
1836
VkDescriptorBufferInfo bufferInfo[MAX_DESC_SET_BINDINGS];
1837
1838
// Preinitialize fields that won't change.
1839
for (size_t i = 0; i < ARRAY_SIZE(writes); i++) {
1840
writes[i].descriptorCount = 1;
1841
writes[i].dstArrayElement = 0;
1842
writes[i].pTexelBufferView = nullptr;
1843
writes[i].pNext = nullptr;
1844
writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1845
}
1846
1847
size_t start = data.flushedDescriptors_;
1848
int writeCount = 0, dedupCount = 0;
1849
1850
for (size_t index = start; index < descSets.size(); index++) {
1851
auto &d = descSets[index];
1852
1853
// This is where we look up to see if we already have an identical descriptor previously in the array.
1854
// We could do a simple custom hash map here that doesn't handle collisions, since those won't matter.
1855
// Instead, for now we just check history one item backwards. Good enough, it seems.
1856
if (index > start + 1) {
1857
if (descSets[index - 1].count == d.count) {
1858
if (!memcmp(descData.data() + d.offset, descData.data() + descSets[index - 1].offset, d.count * sizeof(PackedDescriptor))) {
1859
d.set = descSets[index - 1].set;
1860
dedupCount++;
1861
continue;
1862
}
1863
}
1864
}
1865
1866
if (setsUsed < ARRAY_SIZE(setCache)) {
1867
d.set = setCache[setsUsed++];
1868
} else {
1869
// Allocate in small batches.
1870
bool success = pool.Allocate(setCache, ARRAY_SIZE(setCache), layoutsForAlloc);
1871
_dbg_assert_(success);
1872
d.set = setCache[0];
1873
setsUsed = 1;
1874
}
1875
1876
// TODO: Build up bigger batches of writes.
1877
const PackedDescriptor *data = descData.begin() + d.offset;
1878
int numWrites = 0;
1879
int numBuffers = 0;
1880
int numImages = 0;
1881
for (int i = 0; i < d.count; i++) {
1882
if (!data[i].image.view) { // This automatically also checks for an null buffer due to the union.
1883
continue;
1884
}
1885
switch (this->bindingTypes[i]) {
1886
case BindingType::COMBINED_IMAGE_SAMPLER:
1887
_dbg_assert_(data[i].image.sampler != VK_NULL_HANDLE);
1888
_dbg_assert_(data[i].image.view != VK_NULL_HANDLE);
1889
imageInfo[numImages].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1890
imageInfo[numImages].imageView = data[i].image.view;
1891
imageInfo[numImages].sampler = data[i].image.sampler;
1892
writes[numWrites].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1893
writes[numWrites].pImageInfo = &imageInfo[numImages];
1894
writes[numWrites].pBufferInfo = nullptr;
1895
numImages++;
1896
break;
1897
case BindingType::STORAGE_IMAGE_COMPUTE:
1898
_dbg_assert_(data[i].image.view != VK_NULL_HANDLE);
1899
imageInfo[numImages].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
1900
imageInfo[numImages].imageView = data[i].image.view;
1901
imageInfo[numImages].sampler = VK_NULL_HANDLE;
1902
writes[numWrites].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1903
writes[numWrites].pImageInfo = &imageInfo[numImages];
1904
writes[numWrites].pBufferInfo = nullptr;
1905
numImages++;
1906
break;
1907
case BindingType::STORAGE_BUFFER_VERTEX:
1908
case BindingType::STORAGE_BUFFER_COMPUTE:
1909
_dbg_assert_(data[i].buffer.buffer != VK_NULL_HANDLE);
1910
bufferInfo[numBuffers].buffer = data[i].buffer.buffer;
1911
bufferInfo[numBuffers].range = data[i].buffer.range;
1912
bufferInfo[numBuffers].offset = data[i].buffer.offset;
1913
writes[numWrites].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1914
writes[numWrites].pBufferInfo = &bufferInfo[numBuffers];
1915
writes[numWrites].pImageInfo = nullptr;
1916
numBuffers++;
1917
break;
1918
case BindingType::UNIFORM_BUFFER_DYNAMIC_ALL:
1919
case BindingType::UNIFORM_BUFFER_DYNAMIC_VERTEX:
1920
_dbg_assert_(data[i].buffer.buffer != VK_NULL_HANDLE);
1921
bufferInfo[numBuffers].buffer = data[i].buffer.buffer;
1922
bufferInfo[numBuffers].range = data[i].buffer.range;
1923
bufferInfo[numBuffers].offset = 0;
1924
writes[numWrites].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1925
writes[numWrites].pBufferInfo = &bufferInfo[numBuffers];
1926
writes[numWrites].pImageInfo = nullptr;
1927
numBuffers++;
1928
break;
1929
}
1930
writes[numWrites].dstBinding = i;
1931
writes[numWrites].dstSet = d.set;
1932
numWrites++;
1933
}
1934
1935
vkUpdateDescriptorSets(vulkan->GetDevice(), numWrites, writes, 0, nullptr);
1936
1937
writeCount++;
1938
}
1939
1940
data.flushedDescriptors_ = (int)descSets.size();
1941
profile->descriptorsWritten += writeCount;
1942
profile->descriptorsDeduped += dedupCount;
1943
}
1944
1945
void VulkanRenderManager::SanityCheckPassesOnAdd() {
1946
#if _DEBUG
1947
// Check that we don't have any previous passes that write to the backbuffer, that must ALWAYS be the last one.
1948
for (int i = 0; i < (int)steps_.size(); i++) {
1949
if (steps_[i]->stepType == VKRStepType::RENDER) {
1950
_dbg_assert_msg_(steps_[i]->render.framebuffer != nullptr, "Adding second backbuffer pass? Not good!");
1951
}
1952
}
1953
#endif
1954
}
1955
1956