Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Common/GPU/Vulkan/VulkanRenderManager.h
3188 views
1
#pragma once
2
3
// VulkanRenderManager takes the role that a GL driver does of sequencing and optimizing render passes.
4
// Only draws and binds are handled here, resource creation and allocations are handled as normal -
5
// that's the nice thing with Vulkan.
6
7
#include <atomic>
8
#include <condition_variable>
9
#include <cstdint>
10
#include <mutex>
11
#include <thread>
12
#include <queue>
13
14
#include "Common/Math/Statistics.h"
15
#include "Common/Thread/Promise.h"
16
#include "Common/System/Display.h"
17
#include "Common/GPU/Vulkan/VulkanContext.h"
18
#include "Common/GPU/Vulkan/VulkanBarrier.h"
19
#include "Common/Data/Convert/SmallDataConvert.h"
20
#include "Common/Data/Collections/FastVec.h"
21
#include "Common/Math/math_util.h"
22
#include "Common/GPU/DataFormat.h"
23
#include "Common/GPU/MiscTypes.h"
24
#include "Common/GPU/Vulkan/VulkanQueueRunner.h"
25
#include "Common/GPU/Vulkan/VulkanFramebuffer.h"
26
#include "Common/GPU/Vulkan/VulkanDescSet.h"
27
#include "Common/GPU/thin3d.h"
28
29
// Forward declaration
30
VK_DEFINE_HANDLE(VmaAllocation);
31
32
struct BoundingRect {
33
int x1;
34
int y1;
35
int x2;
36
int y2;
37
38
BoundingRect() {
39
Reset();
40
}
41
42
void Reset() {
43
x1 = 65535;
44
y1 = 65535;
45
x2 = -65535;
46
y2 = -65535;
47
}
48
49
bool Empty() const {
50
return x2 < 0;
51
}
52
53
void SetRect(int x, int y, int width, int height) {
54
x1 = x;
55
y1 = y;
56
x2 = width;
57
y2 = height;
58
}
59
60
void Apply(const VkRect2D &rect) {
61
if (rect.offset.x < x1) x1 = rect.offset.x;
62
if (rect.offset.y < y1) y1 = rect.offset.y;
63
int rect_x2 = rect.offset.x + rect.extent.width;
64
int rect_y2 = rect.offset.y + rect.extent.height;
65
if (rect_x2 > x2) x2 = rect_x2;
66
if (rect_y2 > y2) y2 = rect_y2;
67
}
68
69
VkRect2D ToVkRect2D() const {
70
VkRect2D rect;
71
rect.offset.x = x1;
72
rect.offset.y = y1;
73
rect.extent.width = x2 - x1;
74
rect.extent.height = y2 - y1;
75
return rect;
76
}
77
};
78
79
// All the data needed to create a graphics pipeline.
80
// TODO: Compress this down greatly.
81
class VKRGraphicsPipelineDesc : public Draw::RefCountedObject {
82
public:
83
VKRGraphicsPipelineDesc() : Draw::RefCountedObject("VKRGraphicsPipelineDesc") {}
84
85
VkPipelineCache pipelineCache = VK_NULL_HANDLE;
86
VkPipelineColorBlendStateCreateInfo cbs{ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO };
87
VkPipelineColorBlendAttachmentState blend0{};
88
VkPipelineDepthStencilStateCreateInfo dss{ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO };
89
VkDynamicState dynamicStates[6]{};
90
VkPipelineDynamicStateCreateInfo ds{ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO };
91
VkPipelineRasterizationStateCreateInfo rs{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO };
92
VkPipelineRasterizationProvokingVertexStateCreateInfoEXT rs_provoking{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT };
93
94
// Replaced the ShaderStageInfo with promises here so we can wait for compiles to finish.
95
Promise<VkShaderModule> *vertexShader = nullptr;
96
Promise<VkShaderModule> *fragmentShader = nullptr;
97
Promise<VkShaderModule> *geometryShader = nullptr;
98
99
// These are for pipeline creation failure logging.
100
// TODO: Store pointers to the string instead? Feels iffy but will probably work.
101
std::string vertexShaderSource;
102
std::string fragmentShaderSource;
103
std::string geometryShaderSource;
104
105
VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
106
VkVertexInputAttributeDescription attrs[8]{};
107
VkVertexInputBindingDescription ibd{};
108
VkPipelineVertexInputStateCreateInfo vis{ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO };
109
VkPipelineViewportStateCreateInfo views{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO };
110
111
VKRPipelineLayout *pipelineLayout = nullptr;
112
113
// Does not include the render pass type, it's passed in separately since the
114
// desc is persistent.
115
RPKey rpKey{};
116
};
117
118
// Wrapped pipeline. Does own desc!
119
struct VKRGraphicsPipeline {
120
VKRGraphicsPipeline(PipelineFlags flags, const char *tag) : flags_(flags), tag_(tag) {}
121
~VKRGraphicsPipeline();
122
123
bool Create(VulkanContext *vulkan, VkRenderPass compatibleRenderPass, RenderPassType rpType, VkSampleCountFlagBits sampleCount, double scheduleTime, int countToCompile);
124
void DestroyVariants(VulkanContext *vulkan, bool msaaOnly);
125
126
// This deletes the whole VKRGraphicsPipeline, you must remove your last pointer to it when doing this.
127
void QueueForDeletion(VulkanContext *vulkan);
128
129
// This blocks until any background compiles are finished.
130
// Used during game shutdown before we clear out shaders that these compiles depend on.
131
void BlockUntilCompiled();
132
133
u32 GetVariantsBitmask() const;
134
135
void LogCreationFailure() const;
136
137
VKRGraphicsPipelineDesc *desc = nullptr;
138
Promise<VkPipeline> *pipeline[(size_t)RenderPassType::TYPE_COUNT]{};
139
std::mutex mutex_; // protects the pipeline array
140
141
VkSampleCountFlagBits SampleCount() const { return sampleCount_; }
142
143
const char *Tag() const { return tag_.c_str(); }
144
private:
145
void DestroyVariantsInstant(VkDevice device);
146
147
std::string tag_;
148
PipelineFlags flags_;
149
VkSampleCountFlagBits sampleCount_ = VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM;
150
};
151
152
struct CompileQueueEntry {
153
CompileQueueEntry(VKRGraphicsPipeline *p, VkRenderPass _compatibleRenderPass, RenderPassType _renderPassType, VkSampleCountFlagBits _sampleCount)
154
: type(Type::GRAPHICS), graphics(p), compatibleRenderPass(_compatibleRenderPass), renderPassType(_renderPassType), sampleCount(_sampleCount) {}
155
enum class Type {
156
GRAPHICS,
157
};
158
Type type;
159
VkRenderPass compatibleRenderPass;
160
RenderPassType renderPassType;
161
VKRGraphicsPipeline* graphics = nullptr;
162
VkSampleCountFlagBits sampleCount;
163
};
164
165
// Pending descriptor sets.
166
// TODO: Sort these by VKRPipelineLayout to avoid storing it for each element.
167
struct PendingDescSet {
168
int offset; // probably enough with a u16.
169
u8 count;
170
VkDescriptorSet set;
171
};
172
173
struct PackedDescriptor {
174
union {
175
struct {
176
VkImageView view;
177
VkSampler sampler;
178
} image;
179
struct {
180
VkBuffer buffer;
181
uint32_t range;
182
uint32_t offset;
183
} buffer;
184
#if false
185
struct {
186
VkBuffer buffer;
187
uint64_t range; // write range and a zero offset in one operation with this.
188
} buffer_zero_offset;
189
#endif
190
};
191
};
192
193
// Note that we only support a single descriptor set due to compatibility with some ancient devices.
194
// We should probably eventually give that up eventually.
195
struct VKRPipelineLayout {
196
~VKRPipelineLayout();
197
198
enum { MAX_DESC_SET_BINDINGS = 10 };
199
BindingType bindingTypes[MAX_DESC_SET_BINDINGS];
200
201
uint32_t bindingTypesCount = 0;
202
VkPipelineLayout pipelineLayout = VK_NULL_HANDLE;
203
VkDescriptorSetLayout descriptorSetLayout = VK_NULL_HANDLE; // only support 1 for now.
204
int pushConstSize = 0;
205
const char *tag = nullptr;
206
207
struct FrameData {
208
FrameData() : pool("N/A", true) {}
209
210
VulkanDescSetPool pool;
211
FastVec<PackedDescriptor> descData_;
212
FastVec<PendingDescSet> descSets_;
213
// TODO: We should be able to get away with a single descData_/descSets_ and then send it along,
214
// but it's easier to just segregate by frame id.
215
int flushedDescriptors_ = 0;
216
};
217
218
FrameData frameData[VulkanContext::MAX_INFLIGHT_FRAMES];
219
220
void FlushDescSets(VulkanContext *vulkan, int frame, QueueProfileContext *profile);
221
void SetTag(const char *tag) {
222
this->tag = tag;
223
for (int i = 0; i < ARRAY_SIZE(frameData); i++) {
224
frameData[i].pool.SetTag(tag);
225
}
226
}
227
};
228
229
class VulkanRenderManager {
230
public:
231
VulkanRenderManager(VulkanContext *vulkan, bool useThread, HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY_LENGTH> &frameTimeHistory);
232
~VulkanRenderManager();
233
234
// Makes sure that the GPU has caught up enough that we can start writing buffers of this frame again.
235
void BeginFrame(bool enableProfiling, bool enableLogProfiler);
236
// These can run on a different thread!
237
void Finish();
238
void Present();
239
void CheckNothingPending();
240
241
void SetInvalidationCallback(InvalidationCallback callback) {
242
invalidationCallback_ = callback;
243
}
244
245
// This starts a new step containing a render pass (unless it can be trivially merged into the previous one, which is pretty common).
246
//
247
// After a "CopyFramebuffer" or the other functions that start "steps", you need to call this beforce
248
// making any new render state changes or draw calls.
249
//
250
// The following dynamic state needs to be reset by the caller after calling this (and will thus not safely carry over from
251
// the previous one):
252
// * Viewport/Scissor
253
// * Stencil parameters
254
// * Blend color
255
//
256
// (Most other state is directly decided by your choice of pipeline and descriptor set, so not handled here).
257
//
258
// It can be useful to use GetCurrentStepId() to figure out when you need to send all this state again, if you're
259
// not keeping track of your calls to this function on your own.
260
void BindFramebufferAsRenderTarget(VKRFramebuffer *fb, VKRRenderPassLoadAction color, VKRRenderPassLoadAction depth, VKRRenderPassLoadAction stencil, uint32_t clearColor, float clearDepth, uint8_t clearStencil, const char *tag);
261
262
// Returns an ImageView corresponding to a framebuffer. Is called BindFramebufferAsTexture to maintain a similar interface
263
// as the other backends, even though there's no actual binding happening here.
264
// For layer, we use the same convention as thin3d, where layer = -1 means all layers together. For texturing, that means that you
265
// get an array texture view.
266
VkImageView BindFramebufferAsTexture(VKRFramebuffer *fb, int binding, VkImageAspectFlags aspectBits, int layer);
267
268
bool CopyFramebufferToMemory(VKRFramebuffer *src, VkImageAspectFlags aspectBits, int x, int y, int w, int h, Draw::DataFormat destFormat, uint8_t *pixels, int pixelStride, Draw::ReadbackMode mode, const char *tag);
269
void CopyImageToMemorySync(VkImage image, int mipLevel, int x, int y, int w, int h, Draw::DataFormat destFormat, uint8_t *pixels, int pixelStride, const char *tag);
270
271
void CopyFramebuffer(VKRFramebuffer *src, VkRect2D srcRect, VKRFramebuffer *dst, VkOffset2D dstPos, VkImageAspectFlags aspectMask, const char *tag);
272
void BlitFramebuffer(VKRFramebuffer *src, VkRect2D srcRect, VKRFramebuffer *dst, VkRect2D dstRect, VkImageAspectFlags aspectMask, VkFilter filter, const char *tag);
273
274
// Deferred creation, like in GL. Unlike GL though, the purpose is to allow background creation and avoiding
275
// stalling the emulation thread as much as possible.
276
// We delay creating pipelines until the end of the current render pass, so we can create the right type immediately.
277
// Unless a variantBitmask is passed in, in which case we can just go ahead.
278
// WARNING: desc must stick around during the lifetime of the pipeline! It's not enough to build it on the stack and drop it.
279
VKRGraphicsPipeline *CreateGraphicsPipeline(VKRGraphicsPipelineDesc *desc, PipelineFlags pipelineFlags, uint32_t variantBitmask, VkSampleCountFlagBits sampleCount, bool cacheLoad, const char *tag);
280
281
VKRPipelineLayout *CreatePipelineLayout(BindingType *bindingTypes, size_t bindingCount, bool geoShadersEnabled, const char *tag);
282
void DestroyPipelineLayout(VKRPipelineLayout *pipelineLayout);
283
284
void ReportBadStateForDraw();
285
286
int WaitForPipelines();
287
288
void NudgeCompilerThread() {
289
compileQueueMutex_.lock();
290
compileCond_.notify_one();
291
compileQueueMutex_.unlock();
292
}
293
294
void AssertInRenderPass() const {
295
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
296
}
297
298
// This is the first call in a draw operation. Instead of asserting like we used to, you can now check the
299
// return value and skip the draw if we're in a bad state. In that case, call ReportBadState.
300
// The old assert wasn't very helpful in figuring out what caused it anyway...
301
bool BindPipeline(VKRGraphicsPipeline *pipeline, PipelineFlags flags, VKRPipelineLayout *pipelineLayout) {
302
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER && pipeline != nullptr);
303
if (!curRenderStep_ || curRenderStep_->stepType != VKRStepType::RENDER) {
304
return false;
305
}
306
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
307
data.cmd = VKRRenderCommand::BIND_GRAPHICS_PIPELINE;
308
pipelinesToCheck_.push_back(pipeline);
309
data.graphics_pipeline.pipeline = pipeline;
310
data.graphics_pipeline.pipelineLayout = pipelineLayout;
311
// This can be used to debug cases where depth/stencil rendering is used on color-only framebuffers.
312
// if ((flags & PipelineFlags::USES_DEPTH_STENCIL) && curRenderStep_->render.framebuffer && !curRenderStep_->render.framebuffer->HasDepth()) {
313
// DebugBreak();
314
// }
315
curPipelineFlags_ |= flags;
316
curPipelineLayout_ = pipelineLayout;
317
return true;
318
}
319
320
void SetViewport(const VkViewport &vp) {
321
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
322
_dbg_assert_((int)vp.width >= 0);
323
_dbg_assert_((int)vp.height >= 0);
324
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
325
data.cmd = VKRRenderCommand::VIEWPORT;
326
data.viewport.vp.x = vp.x;
327
data.viewport.vp.y = vp.y;
328
data.viewport.vp.width = vp.width;
329
data.viewport.vp.height = vp.height;
330
// We can't allow values outside this range unless we use VK_EXT_depth_range_unrestricted.
331
// Sometimes state mapping produces 65536/65535 which is slightly outside.
332
// TODO: This should be fixed at the source.
333
data.viewport.vp.minDepth = clamp_value(vp.minDepth, 0.0f, 1.0f);
334
data.viewport.vp.maxDepth = clamp_value(vp.maxDepth, 0.0f, 1.0f);
335
curStepHasViewport_ = true;
336
}
337
338
// It's OK to set scissor outside the valid range - the function will automatically clip.
339
void SetScissor(int x, int y, int width, int height) {
340
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
341
342
if (x < 0) {
343
width += x; // since x is negative, this shrinks width.
344
x = 0;
345
}
346
if (y < 0) {
347
height += y;
348
y = 0;
349
}
350
351
if (x + width > curWidth_) {
352
width = curWidth_ - x;
353
}
354
if (y + height > curHeight_) {
355
height = curHeight_ - y;
356
}
357
358
// Check validity.
359
if (width < 0 || height < 0 || x >= curWidth_ || y >= curHeight_) {
360
// TODO: If any of the dimensions are now zero or negative, we should flip a flag and not do draws, probably.
361
// Instead, if we detect an invalid scissor rectangle, we just put a 1x1 rectangle in the upper left corner.
362
x = 0;
363
y = 0;
364
width = 1;
365
height = 1;
366
}
367
368
VkRect2D rc;
369
rc.offset.x = x;
370
rc.offset.y = y;
371
rc.extent.width = width;
372
rc.extent.height = height;
373
374
curRenderArea_.Apply(rc);
375
376
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
377
data.cmd = VKRRenderCommand::SCISSOR;
378
data.scissor.scissor = rc;
379
curStepHasScissor_ = true;
380
}
381
382
void SetStencilParams(uint8_t writeMask, uint8_t compareMask, uint8_t refValue) {
383
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
384
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
385
data.cmd = VKRRenderCommand::STENCIL;
386
data.stencil.stencilWriteMask = writeMask;
387
data.stencil.stencilCompareMask = compareMask;
388
data.stencil.stencilRef = refValue;
389
}
390
391
void SetBlendFactor(uint32_t color) {
392
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
393
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
394
data.cmd = VKRRenderCommand::BLEND;
395
data.blendColor.color = color;
396
}
397
398
void PushConstants(VkShaderStageFlags stages, int offset, int size, void *constants) {
399
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
400
_dbg_assert_(size + offset < 40);
401
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
402
data.cmd = VKRRenderCommand::PUSH_CONSTANTS;
403
data.push.stages = stages;
404
data.push.offset = offset;
405
data.push.size = size;
406
memcpy(data.push.data, constants, size);
407
}
408
409
void Clear(uint32_t clearColor, float clearZ, int clearStencil, int clearMask);
410
411
// Cheaply set that we don't care about the contents of a surface at the start of the current render pass.
412
// This set the corresponding load-op of the current render pass to DONT_CARE.
413
// Useful when we don't know at bind-time whether we will overwrite the surface or not.
414
void SetLoadDontCare(VkImageAspectFlags aspects) {
415
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
416
if (aspects & VK_IMAGE_ASPECT_COLOR_BIT)
417
curRenderStep_->render.colorLoad = VKRRenderPassLoadAction::DONT_CARE;
418
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
419
curRenderStep_->render.depthLoad = VKRRenderPassLoadAction::DONT_CARE;
420
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
421
curRenderStep_->render.stencilLoad = VKRRenderPassLoadAction::DONT_CARE;
422
}
423
424
// Cheaply set that we don't care about the contents of a surface at the end of the current render pass.
425
// This set the corresponding store-op of the current render pass to DONT_CARE.
426
void SetStoreDontCare(VkImageAspectFlags aspects) {
427
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
428
if (aspects & VK_IMAGE_ASPECT_COLOR_BIT)
429
curRenderStep_->render.colorStore = VKRRenderPassStoreAction::DONT_CARE;
430
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
431
curRenderStep_->render.depthStore = VKRRenderPassStoreAction::DONT_CARE;
432
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
433
curRenderStep_->render.stencilStore = VKRRenderPassStoreAction::DONT_CARE;
434
}
435
436
// Descriptors will match the current pipeline layout, set by the last call to BindPipeline.
437
// Count is the count of void*s. Two are needed for COMBINED_IMAGE_SAMPLER, everything else is a single one.
438
// The goal is to keep this function very small and fast, and do the expensive work on the render thread or
439
// another thread.
440
PackedDescriptor *PushDescriptorSet(int count, int *descSetIndex) {
441
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER);
442
443
int curFrame = vulkan_->GetCurFrame();
444
445
VKRPipelineLayout::FrameData &data = curPipelineLayout_->frameData[curFrame];
446
447
size_t offset = data.descData_.size();
448
PackedDescriptor *retval = data.descData_.extend_uninitialized(count);
449
450
int setIndex = (int)data.descSets_.size();
451
PendingDescSet &descSet = data.descSets_.push_uninitialized();
452
descSet.offset = (uint32_t)offset;
453
descSet.count = count;
454
// descSet.set = VK_NULL_HANDLE; // to be filled in
455
*descSetIndex = setIndex;
456
return retval;
457
}
458
459
void Draw(int descSetIndex, int numUboOffsets, const uint32_t *uboOffsets, VkBuffer vbuffer, int voffset, int count, int offset = 0) {
460
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER && curStepHasViewport_ && curStepHasScissor_);
461
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
462
data.cmd = VKRRenderCommand::DRAW;
463
data.draw.count = count;
464
data.draw.offset = offset;
465
data.draw.descSetIndex = descSetIndex;
466
data.draw.vbuffer = vbuffer;
467
data.draw.voffset = voffset;
468
data.draw.numUboOffsets = numUboOffsets;
469
_dbg_assert_(numUboOffsets <= ARRAY_SIZE(data.draw.uboOffsets));
470
for (int i = 0; i < numUboOffsets; i++)
471
data.draw.uboOffsets[i] = uboOffsets[i];
472
curRenderStep_->render.numDraws++;
473
}
474
475
void DrawIndexed(int descSetIndex, int numUboOffsets, const uint32_t *uboOffsets, VkBuffer vbuffer, int voffset, VkBuffer ibuffer, int ioffset, int count, int numInstances) {
476
_dbg_assert_(curRenderStep_ && curRenderStep_->stepType == VKRStepType::RENDER && curStepHasViewport_ && curStepHasScissor_);
477
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
478
data.cmd = VKRRenderCommand::DRAW_INDEXED;
479
data.drawIndexed.count = count;
480
data.drawIndexed.instances = numInstances;
481
data.drawIndexed.descSetIndex = descSetIndex;
482
data.drawIndexed.vbuffer = vbuffer;
483
data.drawIndexed.voffset = voffset;
484
data.drawIndexed.ibuffer = ibuffer;
485
data.drawIndexed.ioffset = ioffset;
486
data.drawIndexed.numUboOffsets = numUboOffsets;
487
_dbg_assert_(numUboOffsets <= ARRAY_SIZE(data.drawIndexed.uboOffsets));
488
for (int i = 0; i < numUboOffsets; i++)
489
data.drawIndexed.uboOffsets[i] = uboOffsets[i];
490
curRenderStep_->render.numDraws++;
491
}
492
493
// These can be useful both when inspecting in RenderDoc, and when manually inspecting recorded commands
494
// in the debugger.
495
void DebugAnnotate(const char *annotation) {
496
_dbg_assert_(curRenderStep_);
497
VkRenderData &data = curRenderStep_->commands.push_uninitialized();
498
data.cmd = VKRRenderCommand::DEBUG_ANNOTATION;
499
data.debugAnnotation.annotation = annotation;
500
}
501
502
VkCommandBuffer GetInitCmd();
503
504
bool CreateBackbuffers();
505
void DestroyBackbuffers();
506
507
bool HasBackbuffers() {
508
return queueRunner_.HasBackbuffers();
509
}
510
511
void SetInflightFrames(int f) {
512
newInflightFrames_ = f < 1 || f > VulkanContext::MAX_INFLIGHT_FRAMES ? VulkanContext::MAX_INFLIGHT_FRAMES : f;
513
}
514
515
VulkanContext *GetVulkanContext() {
516
return vulkan_;
517
}
518
519
// Be careful with this. Only meant to be used for fetching render passes for shader cache initialization.
520
VulkanQueueRunner *GetQueueRunner() {
521
return &queueRunner_;
522
}
523
524
std::string GetGpuProfileString() const {
525
return frameData_[vulkan_->GetCurFrame()].profile.profileSummary;
526
}
527
528
bool NeedsSwapchainRecreate() const {
529
// Accepting a few of these makes shutdown simpler.
530
return outOfDateFrames_ > VulkanContext::MAX_INFLIGHT_FRAMES;
531
}
532
533
VulkanBarrierBatch &PostInitBarrier() {
534
return postInitBarrier_;
535
}
536
537
void ResetStats();
538
539
void StartThreads();
540
void StopThreads();
541
542
size_t GetNumSteps() const {
543
return steps_.size();
544
}
545
546
private:
547
void EndCurRenderStep();
548
549
void RenderThreadFunc();
550
void CompileThreadFunc();
551
552
void Run(VKRRenderThreadTask &task);
553
554
// Bad for performance but sometimes necessary for synchronous CPU readbacks (screenshots and whatnot).
555
void FlushSync();
556
557
void PresentWaitThreadFunc();
558
void PollPresentTiming();
559
560
void ResetDescriptorLists(int frame);
561
void FlushDescriptors(int frame);
562
563
void SanityCheckPassesOnAdd();
564
bool CreateSwapchainViewsAndDepth(VkCommandBuffer cmdInit, VulkanBarrierBatch *barriers, FrameDataShared &frameDataShared);
565
566
FrameDataShared frameDataShared_;
567
568
FrameData frameData_[VulkanContext::MAX_INFLIGHT_FRAMES];
569
int newInflightFrames_ = -1;
570
int inflightFramesAtStart_ = 0;
571
572
int outOfDateFrames_ = 0;
573
574
// Submission time state
575
576
// Note: These are raw backbuffer-sized. Rotated.
577
int curWidthRaw_ = -1;
578
int curHeightRaw_ = -1;
579
580
// Pre-rotation (as you'd expect).
581
int curWidth_ = -1;
582
int curHeight_ = -1;
583
584
bool insideFrame_ = false;
585
// probably doesn't need to be atomic.
586
std::atomic<bool> runCompileThread_{};
587
588
bool useRenderThread_ = true;
589
bool measurePresentTime_ = false;
590
591
// This is the offset within this frame, in case of a mid-frame sync.
592
VKRStep *curRenderStep_ = nullptr;
593
bool curStepHasViewport_ = false;
594
bool curStepHasScissor_ = false;
595
PipelineFlags curPipelineFlags_{};
596
BoundingRect curRenderArea_;
597
598
std::vector<VKRStep *> steps_;
599
600
// Execution time state
601
VulkanContext *vulkan_;
602
std::thread renderThread_;
603
VulkanQueueRunner queueRunner_;
604
605
// For pushing data on the queue.
606
std::mutex pushMutex_;
607
std::condition_variable pushCondVar_;
608
609
std::queue<VKRRenderThreadTask *> renderThreadQueue_;
610
611
// For readbacks and other reasons we need to sync with the render thread.
612
std::mutex syncMutex_;
613
std::condition_variable syncCondVar_;
614
615
// Shader compilation thread to compile while emulating the rest of the frame.
616
// Only one right now but we could use more.
617
std::thread compileThread_;
618
// Sync
619
std::condition_variable compileCond_;
620
std::mutex compileQueueMutex_;
621
std::vector<CompileQueueEntry> compileQueue_;
622
623
// Thread for measuring presentation delay.
624
std::thread presentWaitThread_;
625
626
// pipelines to check and possibly create at the end of the current render pass.
627
std::vector<VKRGraphicsPipeline *> pipelinesToCheck_;
628
629
// For nicer output in the little internal GPU profiler.
630
SimpleStat initTimeMs_;
631
SimpleStat totalGPUTimeMs_;
632
SimpleStat renderCPUTimeMs_;
633
SimpleStat descUpdateTimeMs_;
634
635
VulkanBarrierBatch postInitBarrier_;
636
637
std::function<void(InvalidationCallbackFlags)> invalidationCallback_;
638
639
uint64_t frameIdGen_ = FRAME_TIME_HISTORY_LENGTH;
640
HistoryBuffer<FrameTimeData, FRAME_TIME_HISTORY_LENGTH> &frameTimeHistory_;
641
642
VKRPipelineLayout *curPipelineLayout_ = nullptr;
643
std::vector<VKRPipelineLayout *> pipelineLayouts_;
644
};
645
646