Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/rendering_device.h
10277 views
1
/**************************************************************************/
2
/* rendering_device.h */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#pragma once
32
33
#include "core/object/worker_thread_pool.h"
34
#include "core/os/condition_variable.h"
35
#include "core/os/thread_safe.h"
36
#include "core/templates/local_vector.h"
37
#include "core/templates/rid_owner.h"
38
#include "core/variant/typed_array.h"
39
#include "servers/display_server.h"
40
#include "servers/rendering/rendering_device_commons.h"
41
#include "servers/rendering/rendering_device_driver.h"
42
#include "servers/rendering/rendering_device_graph.h"
43
44
class RDTextureFormat;
45
class RDTextureView;
46
class RDAttachmentFormat;
47
class RDSamplerState;
48
class RDVertexAttribute;
49
class RDShaderSource;
50
class RDShaderSPIRV;
51
class RDUniform;
52
class RDPipelineRasterizationState;
53
class RDPipelineMultisampleState;
54
class RDPipelineDepthStencilState;
55
class RDPipelineColorBlendState;
56
class RDFramebufferPass;
57
class RDPipelineSpecializationConstant;
58
59
class RenderingDevice : public RenderingDeviceCommons {
60
GDCLASS(RenderingDevice, Object)
61
62
_THREAD_SAFE_CLASS_
63
64
private:
65
Thread::ID render_thread_id;
66
67
public:
68
typedef int64_t DrawListID;
69
typedef int64_t ComputeListID;
70
71
typedef void (*InvalidationCallback)(void *);
72
73
private:
74
static RenderingDevice *singleton;
75
76
RenderingContextDriver *context = nullptr;
77
RenderingDeviceDriver *driver = nullptr;
78
RenderingContextDriver::Device device;
79
80
bool local_device_processing = false;
81
bool is_main_instance = false;
82
83
protected:
84
static void _bind_methods();
85
86
#ifndef DISABLE_DEPRECATED
87
RID _shader_create_from_bytecode_bind_compat_79606(const Vector<uint8_t> &p_shader_binary);
88
RID _texture_create_from_extension_compat_105570(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers);
89
static void _bind_compatibility_methods();
90
#endif
91
92
/***************************/
93
/**** ID INFRASTRUCTURE ****/
94
/***************************/
95
public:
96
//base numeric ID for all types
97
enum {
98
INVALID_FORMAT_ID = -1
99
};
100
101
enum IDType {
102
ID_TYPE_FRAMEBUFFER_FORMAT,
103
ID_TYPE_VERTEX_FORMAT,
104
ID_TYPE_DRAW_LIST,
105
ID_TYPE_COMPUTE_LIST = 4,
106
ID_TYPE_MAX,
107
ID_BASE_SHIFT = 58, // 5 bits for ID types.
108
ID_MASK = (ID_BASE_SHIFT - 1),
109
};
110
111
private:
112
HashMap<RID, HashSet<RID>> dependency_map; // IDs to IDs that depend on it.
113
HashMap<RID, HashSet<RID>> reverse_dependency_map; // Same as above, but in reverse.
114
115
void _add_dependency(RID p_id, RID p_depends_on);
116
void _free_dependencies(RID p_id);
117
118
private:
119
/***************************/
120
/**** BUFFER MANAGEMENT ****/
121
/***************************/
122
123
// These are temporary buffers on CPU memory that hold
124
// the information until the CPU fetches it and places it
125
// either on GPU buffers, or images (textures). It ensures
126
// updates are properly synchronized with whatever the
127
// GPU is doing.
128
//
129
// The logic here is as follows, only 3 of these
130
// blocks are created at the beginning (one per frame)
131
// they can each belong to a frame (assigned to current when
132
// used) and they can only be reused after the same frame is
133
// recycled.
134
//
135
// When CPU requires to allocate more than what is available,
136
// more of these buffers are created. If a limit is reached,
137
// then a fence will ensure will wait for blocks allocated
138
// in previous frames are processed. If that fails, then
139
// another fence will ensure everything pending for the current
140
// frame is processed (effectively stalling).
141
//
142
// See the comments in the code to understand better how it works.
143
144
enum StagingRequiredAction {
145
STAGING_REQUIRED_ACTION_NONE,
146
STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL,
147
STAGING_REQUIRED_ACTION_STALL_PREVIOUS,
148
};
149
150
struct StagingBufferBlock {
151
RDD::BufferID driver_id;
152
uint64_t frame_used = 0;
153
uint32_t fill_amount = 0;
154
};
155
156
struct StagingBuffers {
157
Vector<StagingBufferBlock> blocks;
158
int current = 0;
159
uint32_t block_size = 0;
160
uint64_t max_size = 0;
161
BitField<RDD::BufferUsageBits> usage_bits = {};
162
bool used = false;
163
};
164
165
Error _staging_buffer_allocate(StagingBuffers &p_staging_buffers, uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, StagingRequiredAction &r_required_action, bool p_can_segment = true);
166
void _staging_buffer_execute_required_action(StagingBuffers &p_staging_buffers, StagingRequiredAction p_required_action);
167
Error _insert_staging_block(StagingBuffers &p_staging_buffers);
168
169
StagingBuffers upload_staging_buffers;
170
StagingBuffers download_staging_buffers;
171
172
struct Buffer {
173
RDD::BufferID driver_id;
174
uint32_t size = 0;
175
BitField<RDD::BufferUsageBits> usage = {};
176
RDG::ResourceTracker *draw_tracker = nullptr;
177
int32_t transfer_worker_index = -1;
178
uint64_t transfer_worker_operation = 0;
179
};
180
181
Buffer *_get_buffer_from_owner(RID p_buffer);
182
Error _buffer_initialize(Buffer *p_buffer, Span<uint8_t> p_data, uint32_t p_required_align = 32);
183
184
void update_perf_report();
185
// Flag for batching descriptor sets.
186
bool descriptor_set_batching = true;
187
// When true, the final draw call that copies our offscreen result into the Swapchain is put into its
188
// own cmd buffer, so that the whole rendering can start early instead of having to wait for the
189
// swapchain semaphore to be signaled (which causes bubbles).
190
bool split_swapchain_into_its_own_cmd_buffer = true;
191
uint32_t gpu_copy_count = 0;
192
uint32_t copy_bytes_count = 0;
193
uint32_t prev_gpu_copy_count = 0;
194
uint32_t prev_copy_bytes_count = 0;
195
196
RID_Owner<Buffer, true> uniform_buffer_owner;
197
RID_Owner<Buffer, true> storage_buffer_owner;
198
RID_Owner<Buffer, true> texture_buffer_owner;
199
200
struct BufferGetDataRequest {
201
uint32_t frame_local_index = 0;
202
uint32_t frame_local_count = 0;
203
Callable callback;
204
uint32_t size = 0;
205
};
206
207
public:
208
Error buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size);
209
Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data);
210
Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size);
211
Vector<uint8_t> buffer_get_data(RID p_buffer, uint32_t p_offset = 0, uint32_t p_size = 0); // This causes stall, only use to retrieve large buffers for saving.
212
Error buffer_get_data_async(RID p_buffer, const Callable &p_callback, uint32_t p_offset = 0, uint32_t p_size = 0);
213
uint64_t buffer_get_device_address(RID p_buffer);
214
215
private:
216
/******************/
217
/**** CALLBACK ****/
218
/******************/
219
220
public:
221
enum CallbackResourceType {
222
CALLBACK_RESOURCE_TYPE_TEXTURE,
223
CALLBACK_RESOURCE_TYPE_BUFFER,
224
};
225
226
enum CallbackResourceUsage {
227
CALLBACK_RESOURCE_USAGE_NONE,
228
CALLBACK_RESOURCE_USAGE_COPY_FROM,
229
CALLBACK_RESOURCE_USAGE_COPY_TO,
230
CALLBACK_RESOURCE_USAGE_RESOLVE_FROM,
231
CALLBACK_RESOURCE_USAGE_RESOLVE_TO,
232
CALLBACK_RESOURCE_USAGE_UNIFORM_BUFFER_READ,
233
CALLBACK_RESOURCE_USAGE_INDIRECT_BUFFER_READ,
234
CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ,
235
CALLBACK_RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE,
236
CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ,
237
CALLBACK_RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE,
238
CALLBACK_RESOURCE_USAGE_VERTEX_BUFFER_READ,
239
CALLBACK_RESOURCE_USAGE_INDEX_BUFFER_READ,
240
CALLBACK_RESOURCE_USAGE_TEXTURE_SAMPLE,
241
CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ,
242
CALLBACK_RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE,
243
CALLBACK_RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE,
244
CALLBACK_RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE,
245
CALLBACK_RESOURCE_USAGE_ATTACHMENT_FRAGMENT_SHADING_RATE_READ,
246
CALLBACK_RESOURCE_USAGE_ATTACHMENT_FRAGMENT_DENSITY_MAP_READ,
247
CALLBACK_RESOURCE_USAGE_GENERAL,
248
CALLBACK_RESOURCE_USAGE_MAX
249
};
250
251
struct CallbackResource {
252
RID rid;
253
CallbackResourceType type = CALLBACK_RESOURCE_TYPE_TEXTURE;
254
CallbackResourceUsage usage = CALLBACK_RESOURCE_USAGE_NONE;
255
};
256
257
Error driver_callback_add(RDD::DriverCallback p_callback, void *p_userdata, VectorView<CallbackResource> p_resources);
258
259
/*****************/
260
/**** TEXTURE ****/
261
/*****************/
262
263
// In modern APIs, the concept of textures may not exist;
264
// instead there is the image (the memory pretty much,
265
// the view (how the memory is interpreted) and the
266
// sampler (how it's sampled from the shader).
267
//
268
// Texture here includes the first two stages, but
269
// It's possible to create textures sharing the image
270
// but with different views. The main use case for this
271
// is textures that can be read as both SRGB/Linear,
272
// or slices of a texture (a mipmap, a layer, a 3D slice)
273
// for a framebuffer to render into it.
274
275
struct Texture {
276
struct SharedFallback {
277
uint32_t revision = 1;
278
RDD::TextureID texture;
279
RDG::ResourceTracker *texture_tracker = nullptr;
280
RDD::BufferID buffer;
281
RDG::ResourceTracker *buffer_tracker = nullptr;
282
bool raw_reinterpretation = false;
283
};
284
285
RDD::TextureID driver_id;
286
287
TextureType type = TEXTURE_TYPE_MAX;
288
DataFormat format = DATA_FORMAT_MAX;
289
TextureSamples samples = TEXTURE_SAMPLES_MAX;
290
TextureSliceType slice_type = TEXTURE_SLICE_MAX;
291
Rect2i slice_rect;
292
uint32_t width = 0;
293
uint32_t height = 0;
294
uint32_t depth = 0;
295
uint32_t layers = 0;
296
uint32_t mipmaps = 0;
297
uint32_t usage_flags = 0;
298
uint32_t base_mipmap = 0;
299
uint32_t base_layer = 0;
300
301
Vector<DataFormat> allowed_shared_formats;
302
303
bool is_resolve_buffer = false;
304
bool is_discardable = false;
305
bool has_initial_data = false;
306
307
BitField<RDD::TextureAspectBits> read_aspect_flags = {};
308
BitField<RDD::TextureAspectBits> barrier_aspect_flags = {};
309
bool bound = false; // Bound to framebuffer.
310
RID owner;
311
312
RDG::ResourceTracker *draw_tracker = nullptr;
313
HashMap<Rect2i, RDG::ResourceTracker *> *slice_trackers = nullptr;
314
SharedFallback *shared_fallback = nullptr;
315
int32_t transfer_worker_index = -1;
316
uint64_t transfer_worker_operation = 0;
317
318
RDD::TextureSubresourceRange barrier_range() const {
319
RDD::TextureSubresourceRange r;
320
r.aspect = barrier_aspect_flags;
321
r.base_mipmap = base_mipmap;
322
r.mipmap_count = mipmaps;
323
r.base_layer = base_layer;
324
r.layer_count = layers;
325
return r;
326
}
327
328
TextureFormat texture_format() const {
329
TextureFormat tf;
330
tf.format = format;
331
tf.width = width;
332
tf.height = height;
333
tf.depth = depth;
334
tf.array_layers = layers;
335
tf.mipmaps = mipmaps;
336
tf.texture_type = type;
337
tf.samples = samples;
338
tf.usage_bits = usage_flags;
339
tf.shareable_formats = allowed_shared_formats;
340
tf.is_resolve_buffer = is_resolve_buffer;
341
tf.is_discardable = is_discardable;
342
return tf;
343
}
344
};
345
346
RID_Owner<Texture, true> texture_owner;
347
uint32_t texture_upload_region_size_px = 0;
348
uint32_t texture_download_region_size_px = 0;
349
350
Vector<uint8_t> _texture_get_data(Texture *tex, uint32_t p_layer, bool p_2d = false);
351
uint32_t _texture_layer_count(Texture *p_texture) const;
352
uint32_t _texture_alignment(Texture *p_texture) const;
353
Error _texture_initialize(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, RDD::TextureLayout p_dst_layout, bool p_immediate_flush);
354
void _texture_check_shared_fallback(Texture *p_texture);
355
void _texture_update_shared_fallback(RID p_texture_rid, Texture *p_texture, bool p_for_writing);
356
void _texture_free_shared_fallback(Texture *p_texture);
357
void _texture_copy_shared(RID p_src_texture_rid, Texture *p_src_texture, RID p_dst_texture_rid, Texture *p_dst_texture);
358
void _texture_create_reinterpret_buffer(Texture *p_texture);
359
uint32_t _texture_vrs_method_to_usage_bits() const;
360
361
struct TextureGetDataRequest {
362
uint32_t frame_local_index = 0;
363
uint32_t frame_local_count = 0;
364
Callable callback;
365
uint32_t width = 0;
366
uint32_t height = 0;
367
uint32_t depth = 0;
368
uint32_t mipmaps = 0;
369
RDD::DataFormat format = RDD::DATA_FORMAT_MAX;
370
};
371
372
public:
373
struct TextureView {
374
DataFormat format_override = DATA_FORMAT_MAX; // // Means, use same as format.
375
TextureSwizzle swizzle_r = TEXTURE_SWIZZLE_R;
376
TextureSwizzle swizzle_g = TEXTURE_SWIZZLE_G;
377
TextureSwizzle swizzle_b = TEXTURE_SWIZZLE_B;
378
TextureSwizzle swizzle_a = TEXTURE_SWIZZLE_A;
379
380
bool operator==(const TextureView &p_other) const {
381
if (format_override != p_other.format_override) {
382
return false;
383
} else if (swizzle_r != p_other.swizzle_r) {
384
return false;
385
} else if (swizzle_g != p_other.swizzle_g) {
386
return false;
387
} else if (swizzle_b != p_other.swizzle_b) {
388
return false;
389
} else if (swizzle_a != p_other.swizzle_a) {
390
return false;
391
} else {
392
return true;
393
}
394
}
395
};
396
397
RID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data = Vector<Vector<uint8_t>>());
398
RID texture_create_shared(const TextureView &p_view, RID p_with_texture);
399
RID texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers, uint64_t p_mipmaps = 1);
400
RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D, uint32_t p_layers = 0);
401
Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data);
402
Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer); // CPU textures will return immediately, while GPU textures will most likely force a flush
403
Error texture_get_data_async(RID p_texture, uint32_t p_layer, const Callable &p_callback);
404
405
bool texture_is_format_supported_for_usage(DataFormat p_format, BitField<TextureUsageBits> p_usage) const;
406
bool texture_is_shared(RID p_texture);
407
bool texture_is_valid(RID p_texture);
408
TextureFormat texture_get_format(RID p_texture);
409
Size2i texture_size(RID p_texture);
410
#ifndef DISABLE_DEPRECATED
411
uint64_t texture_get_native_handle(RID p_texture);
412
#endif
413
414
Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer);
415
Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers);
416
Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture);
417
418
void texture_set_discardable(RID p_texture, bool p_discardable);
419
bool texture_is_discardable(RID p_texture);
420
421
public:
422
/*************/
423
/**** VRS ****/
424
/*************/
425
426
enum VRSMethod {
427
VRS_METHOD_NONE,
428
VRS_METHOD_FRAGMENT_SHADING_RATE,
429
VRS_METHOD_FRAGMENT_DENSITY_MAP,
430
};
431
432
private:
433
VRSMethod vrs_method = VRS_METHOD_NONE;
434
DataFormat vrs_format = DATA_FORMAT_MAX;
435
Size2i vrs_texel_size;
436
437
static RDG::ResourceUsage _vrs_usage_from_method(VRSMethod p_method);
438
static RDD::PipelineStageBits _vrs_stages_from_method(VRSMethod p_method);
439
static RDD::TextureLayout _vrs_layout_from_method(VRSMethod p_method);
440
void _vrs_detect_method();
441
442
public:
443
VRSMethod vrs_get_method() const;
444
DataFormat vrs_get_format() const;
445
Size2i vrs_get_texel_size() const;
446
447
/*********************/
448
/**** FRAMEBUFFER ****/
449
/*********************/
450
451
// In modern APIs, generally, framebuffers work similar to how they
452
// do in OpenGL, with the exception that
453
// the "format" (RDD::RenderPassID) is not dynamic
454
// and must be more or less the same as the one
455
// used for the render pipelines.
456
457
struct AttachmentFormat {
458
enum : uint32_t {
459
UNUSED_ATTACHMENT = 0xFFFFFFFF
460
};
461
DataFormat format;
462
TextureSamples samples;
463
uint32_t usage_flags;
464
AttachmentFormat() {
465
format = DATA_FORMAT_R8G8B8A8_UNORM;
466
samples = TEXTURE_SAMPLES_1;
467
usage_flags = 0;
468
}
469
};
470
471
struct FramebufferPass {
472
Vector<int32_t> color_attachments;
473
Vector<int32_t> input_attachments;
474
Vector<int32_t> resolve_attachments;
475
Vector<int32_t> preserve_attachments;
476
int32_t depth_attachment = ATTACHMENT_UNUSED;
477
};
478
479
typedef int64_t FramebufferFormatID;
480
481
private:
482
struct FramebufferFormatKey {
483
Vector<AttachmentFormat> attachments;
484
Vector<FramebufferPass> passes;
485
uint32_t view_count = 1;
486
VRSMethod vrs_method = VRS_METHOD_NONE;
487
int32_t vrs_attachment = ATTACHMENT_UNUSED;
488
Size2i vrs_texel_size;
489
490
bool operator<(const FramebufferFormatKey &p_key) const {
491
if (vrs_texel_size != p_key.vrs_texel_size) {
492
return vrs_texel_size < p_key.vrs_texel_size;
493
}
494
495
if (vrs_attachment != p_key.vrs_attachment) {
496
return vrs_attachment < p_key.vrs_attachment;
497
}
498
499
if (vrs_method != p_key.vrs_method) {
500
return vrs_method < p_key.vrs_method;
501
}
502
503
if (view_count != p_key.view_count) {
504
return view_count < p_key.view_count;
505
}
506
507
uint32_t pass_size = passes.size();
508
uint32_t key_pass_size = p_key.passes.size();
509
if (pass_size != key_pass_size) {
510
return pass_size < key_pass_size;
511
}
512
const FramebufferPass *pass_ptr = passes.ptr();
513
const FramebufferPass *key_pass_ptr = p_key.passes.ptr();
514
515
for (uint32_t i = 0; i < pass_size; i++) {
516
{ // Compare color attachments.
517
uint32_t attachment_size = pass_ptr[i].color_attachments.size();
518
uint32_t key_attachment_size = key_pass_ptr[i].color_attachments.size();
519
if (attachment_size != key_attachment_size) {
520
return attachment_size < key_attachment_size;
521
}
522
const int32_t *pass_attachment_ptr = pass_ptr[i].color_attachments.ptr();
523
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].color_attachments.ptr();
524
525
for (uint32_t j = 0; j < attachment_size; j++) {
526
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
527
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
528
}
529
}
530
}
531
{ // Compare input attachments.
532
uint32_t attachment_size = pass_ptr[i].input_attachments.size();
533
uint32_t key_attachment_size = key_pass_ptr[i].input_attachments.size();
534
if (attachment_size != key_attachment_size) {
535
return attachment_size < key_attachment_size;
536
}
537
const int32_t *pass_attachment_ptr = pass_ptr[i].input_attachments.ptr();
538
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].input_attachments.ptr();
539
540
for (uint32_t j = 0; j < attachment_size; j++) {
541
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
542
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
543
}
544
}
545
}
546
{ // Compare resolve attachments.
547
uint32_t attachment_size = pass_ptr[i].resolve_attachments.size();
548
uint32_t key_attachment_size = key_pass_ptr[i].resolve_attachments.size();
549
if (attachment_size != key_attachment_size) {
550
return attachment_size < key_attachment_size;
551
}
552
const int32_t *pass_attachment_ptr = pass_ptr[i].resolve_attachments.ptr();
553
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].resolve_attachments.ptr();
554
555
for (uint32_t j = 0; j < attachment_size; j++) {
556
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
557
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
558
}
559
}
560
}
561
{ // Compare preserve attachments.
562
uint32_t attachment_size = pass_ptr[i].preserve_attachments.size();
563
uint32_t key_attachment_size = key_pass_ptr[i].preserve_attachments.size();
564
if (attachment_size != key_attachment_size) {
565
return attachment_size < key_attachment_size;
566
}
567
const int32_t *pass_attachment_ptr = pass_ptr[i].preserve_attachments.ptr();
568
const int32_t *key_pass_attachment_ptr = key_pass_ptr[i].preserve_attachments.ptr();
569
570
for (uint32_t j = 0; j < attachment_size; j++) {
571
if (pass_attachment_ptr[j] != key_pass_attachment_ptr[j]) {
572
return pass_attachment_ptr[j] < key_pass_attachment_ptr[j];
573
}
574
}
575
}
576
if (pass_ptr[i].depth_attachment != key_pass_ptr[i].depth_attachment) {
577
return pass_ptr[i].depth_attachment < key_pass_ptr[i].depth_attachment;
578
}
579
}
580
581
int as = attachments.size();
582
int bs = p_key.attachments.size();
583
if (as != bs) {
584
return as < bs;
585
}
586
587
const AttachmentFormat *af_a = attachments.ptr();
588
const AttachmentFormat *af_b = p_key.attachments.ptr();
589
for (int i = 0; i < as; i++) {
590
const AttachmentFormat &a = af_a[i];
591
const AttachmentFormat &b = af_b[i];
592
if (a.format != b.format) {
593
return a.format < b.format;
594
}
595
if (a.samples != b.samples) {
596
return a.samples < b.samples;
597
}
598
if (a.usage_flags != b.usage_flags) {
599
return a.usage_flags < b.usage_flags;
600
}
601
}
602
603
return false; // Equal.
604
}
605
};
606
607
static RDD::RenderPassID _render_pass_create(RenderingDeviceDriver *p_driver, const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, uint32_t p_view_count = 1, VRSMethod p_vrs_method = VRS_METHOD_NONE, int32_t p_vrs_attachment = -1, Size2i p_vrs_texel_size = Size2i(), Vector<TextureSamples> *r_samples = nullptr);
608
static RDD::RenderPassID _render_pass_create_from_graph(RenderingDeviceDriver *p_driver, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, void *p_user_data);
609
610
// This is a cache and it's never freed, it ensures
611
// IDs for a given format are always unique.
612
RBMap<FramebufferFormatKey, FramebufferFormatID> framebuffer_format_cache;
613
struct FramebufferFormat {
614
const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E;
615
RDD::RenderPassID render_pass; // Here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec).
616
Vector<TextureSamples> pass_samples;
617
uint32_t view_count = 1; // Number of views.
618
};
619
620
HashMap<FramebufferFormatID, FramebufferFormat> framebuffer_formats;
621
622
struct Framebuffer {
623
RenderingDevice *rendering_device = nullptr;
624
FramebufferFormatID format_id;
625
uint32_t storage_mask = 0;
626
Vector<RID> texture_ids;
627
InvalidationCallback invalidated_callback = nullptr;
628
void *invalidated_callback_userdata = nullptr;
629
RDG::FramebufferCache *framebuffer_cache = nullptr;
630
Size2 size;
631
uint32_t view_count;
632
};
633
634
RID_Owner<Framebuffer, true> framebuffer_owner;
635
636
public:
637
// This ID is warranted to be unique for the same formats, does not need to be freed
638
FramebufferFormatID framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count = 1, int32_t p_vrs_attachment = -1);
639
FramebufferFormatID framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count = 1, int32_t p_vrs_attachment = -1);
640
FramebufferFormatID framebuffer_format_create_empty(TextureSamples p_samples = TEXTURE_SAMPLES_1);
641
TextureSamples framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass = 0);
642
643
RID framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
644
RID framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
645
RID framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples = TEXTURE_SAMPLES_1, FramebufferFormatID p_format_check = INVALID_ID);
646
bool framebuffer_is_valid(RID p_framebuffer) const;
647
void framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata);
648
649
FramebufferFormatID framebuffer_get_format(RID p_framebuffer);
650
Size2 framebuffer_get_size(RID p_framebuffer);
651
652
/*****************/
653
/**** SAMPLER ****/
654
/*****************/
655
private:
656
RID_Owner<RDD::SamplerID, true> sampler_owner;
657
658
public:
659
RID sampler_create(const SamplerState &p_state);
660
bool sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_sampler_filter) const;
661
662
/**********************/
663
/**** VERTEX ARRAY ****/
664
/**********************/
665
666
typedef int64_t VertexFormatID;
667
668
private:
669
// Vertex buffers in Vulkan are similar to how
670
// they work in OpenGL, except that instead of
671
// an attribute index, there is a buffer binding
672
// index (for binding the buffers in real-time)
673
// and a location index (what is used in the shader).
674
//
675
// This mapping is done here internally, and it's not
676
// exposed.
677
678
RID_Owner<Buffer, true> vertex_buffer_owner;
679
680
struct VertexDescriptionKey {
681
Vector<VertexAttribute> vertex_formats;
682
683
bool operator==(const VertexDescriptionKey &p_key) const {
684
int vdc = vertex_formats.size();
685
int vdck = p_key.vertex_formats.size();
686
687
if (vdc != vdck) {
688
return false;
689
} else {
690
const VertexAttribute *a_ptr = vertex_formats.ptr();
691
const VertexAttribute *b_ptr = p_key.vertex_formats.ptr();
692
for (int i = 0; i < vdc; i++) {
693
const VertexAttribute &a = a_ptr[i];
694
const VertexAttribute &b = b_ptr[i];
695
696
if (a.location != b.location) {
697
return false;
698
}
699
if (a.offset != b.offset) {
700
return false;
701
}
702
if (a.format != b.format) {
703
return false;
704
}
705
if (a.stride != b.stride) {
706
return false;
707
}
708
if (a.frequency != b.frequency) {
709
return false;
710
}
711
}
712
return true; // They are equal.
713
}
714
}
715
716
uint32_t hash() const {
717
int vdc = vertex_formats.size();
718
uint32_t h = hash_murmur3_one_32(vdc);
719
const VertexAttribute *ptr = vertex_formats.ptr();
720
for (int i = 0; i < vdc; i++) {
721
const VertexAttribute &vd = ptr[i];
722
h = hash_murmur3_one_32(vd.location, h);
723
h = hash_murmur3_one_32(vd.offset, h);
724
h = hash_murmur3_one_32(vd.format, h);
725
h = hash_murmur3_one_32(vd.stride, h);
726
h = hash_murmur3_one_32(vd.frequency, h);
727
}
728
return hash_fmix32(h);
729
}
730
};
731
732
struct VertexDescriptionHash {
733
static _FORCE_INLINE_ uint32_t hash(const VertexDescriptionKey &p_key) {
734
return p_key.hash();
735
}
736
};
737
738
// This is a cache and it's never freed, it ensures that
739
// ID used for a specific format always remain the same.
740
HashMap<VertexDescriptionKey, VertexFormatID, VertexDescriptionHash> vertex_format_cache;
741
742
struct VertexDescriptionCache {
743
Vector<VertexAttribute> vertex_formats;
744
RDD::VertexFormatID driver_id;
745
};
746
747
HashMap<VertexFormatID, VertexDescriptionCache> vertex_formats;
748
749
struct VertexArray {
750
RID buffer;
751
VertexFormatID description;
752
int vertex_count = 0;
753
uint32_t max_instances_allowed = 0;
754
755
Vector<RDD::BufferID> buffers; // Not owned, just referenced.
756
Vector<RDG::ResourceTracker *> draw_trackers; // Not owned, just referenced.
757
Vector<uint64_t> offsets;
758
Vector<int32_t> transfer_worker_indices;
759
Vector<uint64_t> transfer_worker_operations;
760
HashSet<RID> untracked_buffers;
761
};
762
763
RID_Owner<VertexArray, true> vertex_array_owner;
764
765
struct IndexBuffer : public Buffer {
766
uint32_t max_index = 0; // Used for validation.
767
uint32_t index_count = 0;
768
IndexBufferFormat format = INDEX_BUFFER_FORMAT_UINT16;
769
bool supports_restart_indices = false;
770
};
771
772
RID_Owner<IndexBuffer, true> index_buffer_owner;
773
774
struct IndexArray {
775
uint32_t max_index = 0; // Remember the maximum index here too, for validation.
776
RDD::BufferID driver_id; // Not owned, inherited from index buffer.
777
RDG::ResourceTracker *draw_tracker = nullptr; // Not owned, inherited from index buffer.
778
uint32_t offset = 0;
779
uint32_t indices = 0;
780
IndexBufferFormat format = INDEX_BUFFER_FORMAT_UINT16;
781
bool supports_restart_indices = false;
782
int32_t transfer_worker_index = -1;
783
uint64_t transfer_worker_operation = 0;
784
};
785
786
RID_Owner<IndexArray, true> index_array_owner;
787
788
public:
789
enum BufferCreationBits {
790
BUFFER_CREATION_DEVICE_ADDRESS_BIT = (1 << 0),
791
BUFFER_CREATION_AS_STORAGE_BIT = (1 << 1),
792
};
793
794
enum StorageBufferUsage {
795
STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT = (1 << 0),
796
};
797
798
RID vertex_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<BufferCreationBits> p_creation_bits = 0);
799
RID _vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<BufferCreationBits> p_creation_bits = 0) {
800
return vertex_buffer_create(p_size_bytes, p_data, p_creation_bits);
801
}
802
803
// This ID is warranted to be unique for the same formats, does not need to be freed
804
VertexFormatID vertex_format_create(const Vector<VertexAttribute> &p_vertex_descriptions);
805
RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets = Vector<uint64_t>());
806
807
RID index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, Span<uint8_t> p_data = {}, bool p_use_restart_indices = false, BitField<BufferCreationBits> p_creation_bits = 0);
808
RID _index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices = false, BitField<BufferCreationBits> p_creation_bits = 0) {
809
return index_buffer_create(p_index_count, p_format, p_data, p_use_restart_indices, p_creation_bits);
810
}
811
812
RID index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count);
813
814
/****************/
815
/**** SHADER ****/
816
/****************/
817
818
// Some APIs (e.g., Vulkan) specifies a really complex behavior for the application
819
// in order to tell when descriptor sets need to be re-bound (or not).
820
// "When binding a descriptor set (see Descriptor Set Binding) to set
821
// number N, if the previously bound descriptor sets for sets zero
822
// through N-1 were all bound using compatible pipeline layouts,
823
// then performing this binding does not disturb any of the lower numbered sets.
824
// If, additionally, the previous bound descriptor set for set N was
825
// bound using a pipeline layout compatible for set N, then the bindings
826
// in sets numbered greater than N are also not disturbed."
827
// As a result, we need to figure out quickly when something is no longer "compatible".
828
// in order to avoid costly rebinds.
829
830
private:
831
struct UniformSetFormat {
832
Vector<ShaderUniform> uniforms;
833
834
_FORCE_INLINE_ bool operator<(const UniformSetFormat &p_other) const {
835
if (uniforms.size() != p_other.uniforms.size()) {
836
return uniforms.size() < p_other.uniforms.size();
837
}
838
for (int i = 0; i < uniforms.size(); i++) {
839
if (uniforms[i] < p_other.uniforms[i]) {
840
return true;
841
} else if (p_other.uniforms[i] < uniforms[i]) {
842
return false;
843
}
844
}
845
return false;
846
}
847
};
848
849
// Always grows, never shrinks, ensuring unique IDs, but we assume
850
// the amount of formats will never be a problem, as the amount of shaders
851
// in a game is limited.
852
RBMap<UniformSetFormat, uint32_t> uniform_set_format_cache;
853
854
// Shaders in Vulkan are just pretty much
855
// precompiled blocks of SPIR-V bytecode. They
856
// are most likely not really compiled to host
857
// assembly until a pipeline is created.
858
//
859
// When supplying the shaders, this implementation
860
// will use the reflection abilities of glslang to
861
// understand and cache everything required to
862
// create and use the descriptor sets (Vulkan's
863
// biggest pain).
864
//
865
// Additionally, hashes are created for every set
866
// to do quick validation and ensuring the user
867
// does not submit something invalid.
868
869
struct Shader : public ShaderReflection {
870
String name; // Used for debug.
871
RDD::ShaderID driver_id;
872
uint32_t layout_hash = 0;
873
BitField<RDD::PipelineStageBits> stage_bits = {};
874
Vector<uint32_t> set_formats;
875
};
876
877
String _shader_uniform_debug(RID p_shader, int p_set = -1);
878
879
RID_Owner<Shader, true> shader_owner;
880
881
#ifndef DISABLE_DEPRECATED
882
public:
883
enum BarrierMask {
884
BARRIER_MASK_VERTEX = 1,
885
BARRIER_MASK_FRAGMENT = 8,
886
BARRIER_MASK_COMPUTE = 2,
887
BARRIER_MASK_TRANSFER = 4,
888
889
BARRIER_MASK_RASTER = BARRIER_MASK_VERTEX | BARRIER_MASK_FRAGMENT, // 9,
890
BARRIER_MASK_ALL_BARRIERS = 0x7FFF, // all flags set
891
BARRIER_MASK_NO_BARRIER = 0x8000,
892
};
893
894
enum InitialAction {
895
INITIAL_ACTION_LOAD,
896
INITIAL_ACTION_CLEAR,
897
INITIAL_ACTION_DISCARD,
898
INITIAL_ACTION_MAX,
899
INITIAL_ACTION_CLEAR_REGION = INITIAL_ACTION_CLEAR,
900
INITIAL_ACTION_CLEAR_REGION_CONTINUE = INITIAL_ACTION_CLEAR,
901
INITIAL_ACTION_KEEP = INITIAL_ACTION_LOAD,
902
INITIAL_ACTION_DROP = INITIAL_ACTION_DISCARD,
903
INITIAL_ACTION_CONTINUE = INITIAL_ACTION_LOAD,
904
};
905
906
enum FinalAction {
907
FINAL_ACTION_STORE,
908
FINAL_ACTION_DISCARD,
909
FINAL_ACTION_MAX,
910
FINAL_ACTION_READ = FINAL_ACTION_STORE,
911
FINAL_ACTION_CONTINUE = FINAL_ACTION_STORE,
912
};
913
914
void barrier(BitField<BarrierMask> p_from = BARRIER_MASK_ALL_BARRIERS, BitField<BarrierMask> p_to = BARRIER_MASK_ALL_BARRIERS);
915
void full_barrier();
916
void draw_command_insert_label(String p_label_name, const Color &p_color = Color(1, 1, 1, 1));
917
Error draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const Vector<RID> &p_storage_textures = Vector<RID>());
918
Error draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids);
919
Vector<int64_t> _draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2(), const TypedArray<RID> &p_storage_textures = TypedArray<RID>());
920
Vector<int64_t> _draw_list_switch_to_next_pass_split(uint32_t p_splits);
921
922
private:
923
void _draw_list_end_bind_compat_81356(BitField<BarrierMask> p_post_barrier);
924
void _compute_list_end_bind_compat_81356(BitField<BarrierMask> p_post_barrier);
925
void _barrier_bind_compat_81356(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to);
926
927
void _draw_list_end_bind_compat_84976(BitField<BarrierMask> p_post_barrier);
928
void _compute_list_end_bind_compat_84976(BitField<BarrierMask> p_post_barrier);
929
InitialAction _convert_initial_action_84976(InitialAction p_old_initial_action);
930
FinalAction _convert_final_action_84976(FinalAction p_old_final_action);
931
DrawListID _draw_list_begin_bind_compat_84976(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures);
932
ComputeListID _compute_list_begin_bind_compat_84976(bool p_allow_draw_overlap);
933
Error _buffer_update_bind_compat_84976(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier);
934
Error _buffer_clear_bind_compat_84976(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier);
935
Error _texture_update_bind_compat_84976(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier);
936
Error _texture_copy_bind_compat_84976(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier);
937
Error _texture_clear_bind_compat_84976(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier);
938
Error _texture_resolve_multisample_bind_compat_84976(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier);
939
940
FramebufferFormatID _screen_get_framebuffer_format_bind_compat_87340() const;
941
942
DrawListID _draw_list_begin_bind_compat_90993(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region);
943
944
DrawListID _draw_list_begin_bind_compat_98670(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, uint32_t p_breadcrumb);
945
946
RID _uniform_buffer_create_bind_compat_101561(uint32_t p_size_bytes, const Vector<uint8_t> &p_data);
947
RID _vertex_buffer_create_bind_compat_101561(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage);
948
RID _index_buffer_create_bind_compat_101561(uint32_t p_size_indices, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices);
949
RID _storage_buffer_create_bind_compat_101561(uint32_t p_size, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage);
950
#endif
951
952
public:
953
RenderingDeviceDriver *get_device_driver() const { return driver; }
954
RenderingContextDriver *get_context_driver() const { return context; }
955
956
const RDD::Capabilities &get_device_capabilities() const { return driver->get_capabilities(); }
957
958
bool has_feature(const Features p_feature) const;
959
960
Vector<uint8_t> shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language = SHADER_LANGUAGE_GLSL, String *r_error = nullptr, bool p_allow_cache = true);
961
Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
962
963
RID shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
964
RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID());
965
RID shader_create_placeholder();
966
void shader_destroy_modules(RID p_shader);
967
968
uint64_t shader_get_vertex_input_attribute_mask(RID p_shader);
969
970
/******************/
971
/**** UNIFORMS ****/
972
/******************/
973
String get_perf_report() const;
974
975
/*****************/
976
/**** BUFFERS ****/
977
/*****************/
978
979
RID uniform_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<BufferCreationBits> p_creation_bits = 0);
980
RID _uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<BufferCreationBits> p_creation_bits = 0) {
981
return uniform_buffer_create(p_size_bytes, p_data, p_creation_bits);
982
}
983
984
RID storage_buffer_create(uint32_t p_size_bytes, Span<uint8_t> p_data = {}, BitField<StorageBufferUsage> p_usage = 0, BitField<BufferCreationBits> p_creation_bits = 0);
985
RID _storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage = 0, BitField<BufferCreationBits> p_creation_bits = 0) {
986
return storage_buffer_create(p_size_bytes, p_data, p_usage, p_creation_bits);
987
}
988
989
RID texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, Span<uint8_t> p_data = {});
990
RID _texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
991
return texture_buffer_create(p_size_elements, p_format, p_data);
992
}
993
994
struct Uniform {
995
UniformType uniform_type = UNIFORM_TYPE_IMAGE;
996
uint32_t binding = 0; // Binding index as specified in shader.
997
// This flag specifies that this is an immutable sampler to be set when creating pipeline layout.
998
bool immutable_sampler = false;
999
1000
private:
1001
// In most cases only one ID is provided per binding, so avoid allocating memory unnecessarily for performance.
1002
RID id; // If only one is provided, this is used.
1003
Vector<RID> ids; // If multiple ones are provided, this is used instead.
1004
1005
public:
1006
_FORCE_INLINE_ uint32_t get_id_count() const {
1007
return (id.is_valid() ? 1 : ids.size());
1008
}
1009
1010
_FORCE_INLINE_ RID get_id(uint32_t p_idx) const {
1011
if (id.is_valid()) {
1012
ERR_FAIL_COND_V(p_idx != 0, RID());
1013
return id;
1014
} else {
1015
return ids[p_idx];
1016
}
1017
}
1018
_FORCE_INLINE_ void set_id(uint32_t p_idx, RID p_id) {
1019
if (id.is_valid()) {
1020
ERR_FAIL_COND(p_idx != 0);
1021
id = p_id;
1022
} else {
1023
ids.write[p_idx] = p_id;
1024
}
1025
}
1026
1027
_FORCE_INLINE_ void append_id(RID p_id) {
1028
if (ids.is_empty()) {
1029
if (id == RID()) {
1030
id = p_id;
1031
} else {
1032
ids.push_back(id);
1033
ids.push_back(p_id);
1034
id = RID();
1035
}
1036
} else {
1037
ids.push_back(p_id);
1038
}
1039
}
1040
1041
_FORCE_INLINE_ void clear_ids() {
1042
id = RID();
1043
ids.clear();
1044
}
1045
1046
_FORCE_INLINE_ Uniform(UniformType p_type, int p_binding, RID p_id) {
1047
uniform_type = p_type;
1048
binding = p_binding;
1049
id = p_id;
1050
}
1051
_FORCE_INLINE_ Uniform(UniformType p_type, int p_binding, const Vector<RID> &p_ids) {
1052
uniform_type = p_type;
1053
binding = p_binding;
1054
ids = p_ids;
1055
}
1056
_FORCE_INLINE_ Uniform() = default;
1057
};
1058
1059
typedef Uniform PipelineImmutableSampler;
1060
RID shader_create_from_bytecode_with_samplers(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID(), const Vector<PipelineImmutableSampler> &p_immutable_samplers = Vector<PipelineImmutableSampler>());
1061
1062
private:
1063
static const uint32_t MAX_UNIFORM_SETS = 16;
1064
static const uint32_t MAX_PUSH_CONSTANT_SIZE = 128;
1065
1066
// This structure contains the descriptor set. They _need_ to be allocated
1067
// for a shader (and will be erased when this shader is erased), but should
1068
// work for other shaders as long as the hash matches. This covers using
1069
// them in shader variants.
1070
//
1071
// Keep also in mind that you can share buffers between descriptor sets, so
1072
// the above restriction is not too serious.
1073
1074
struct UniformSet {
1075
uint32_t format = 0;
1076
RID shader_id;
1077
uint32_t shader_set = 0;
1078
RDD::UniformSetID driver_id;
1079
struct AttachableTexture {
1080
uint32_t bind = 0;
1081
RID texture;
1082
};
1083
1084
struct SharedTexture {
1085
uint32_t writing = 0;
1086
RID texture;
1087
};
1088
1089
LocalVector<AttachableTexture> attachable_textures; // Used for validation.
1090
Vector<RDG::ResourceTracker *> draw_trackers;
1091
Vector<RDG::ResourceUsage> draw_trackers_usage;
1092
HashMap<RID, RDG::ResourceUsage> untracked_usage;
1093
LocalVector<SharedTexture> shared_textures_to_update;
1094
InvalidationCallback invalidated_callback = nullptr;
1095
void *invalidated_callback_userdata = nullptr;
1096
};
1097
1098
RID_Owner<UniformSet, true> uniform_set_owner;
1099
1100
void _uniform_set_update_shared(UniformSet *p_uniform_set);
1101
1102
public:
1103
/** Bake a set of uniforms that can be bound at runtime with the given shader.
1104
* @remark Setting p_linear_pool = true while keeping the RID around for longer than the current frame will result in undefined behavior.
1105
* @param p_uniforms The uniforms to bake into a set.
1106
* @param p_shader The shader you intend to bind these uniforms with.
1107
* @param p_set_index The set. Should be in range [0; 4)
1108
* The value 4 comes from physical_device_properties.limits.maxBoundDescriptorSets. Vulkan only guarantees maxBoundDescriptorSets >= 4 (== 4 is very common on Mobile).
1109
* @param p_linear_pool If you call this function every frame (and free the returned RID within the same frame!), set it to true for better performance.
1110
* If you plan on keeping the return value around for more than one frame (e.g. Sets that are created once and reused forever) you MUST set it to false.
1111
* @return Baked descriptor set.
1112
*/
1113
RID uniform_set_create(const VectorView<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool = false);
1114
bool uniform_set_is_valid(RID p_uniform_set);
1115
void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata);
1116
1117
bool uniform_sets_have_linear_pools() const;
1118
1119
/*******************/
1120
/**** PIPELINES ****/
1121
/*******************/
1122
1123
// Render pipeline contains ALL the
1124
// information required for drawing.
1125
// This includes all the rasterizer state
1126
// as well as shader used, framebuffer format,
1127
// etc.
1128
// While the pipeline is just a single object
1129
// (VkPipeline) a lot of values are also saved
1130
// here to do validation (vulkan does none by
1131
// default) and warn the user if something
1132
// was not supplied as intended.
1133
private:
1134
struct RenderPipeline {
1135
// Cached values for validation.
1136
#ifdef DEBUG_ENABLED
1137
struct Validation {
1138
FramebufferFormatID framebuffer_format;
1139
uint32_t render_pass = 0;
1140
uint32_t dynamic_state = 0;
1141
VertexFormatID vertex_format;
1142
bool uses_restart_indices = false;
1143
uint32_t primitive_minimum = 0;
1144
uint32_t primitive_divisor = 0;
1145
} validation;
1146
#endif
1147
// Actual pipeline.
1148
RID shader;
1149
RDD::ShaderID shader_driver_id;
1150
uint32_t shader_layout_hash = 0;
1151
Vector<uint32_t> set_formats;
1152
RDD::PipelineID driver_id;
1153
BitField<RDD::PipelineStageBits> stage_bits = {};
1154
uint32_t push_constant_size = 0;
1155
};
1156
1157
RID_Owner<RenderPipeline, true> render_pipeline_owner;
1158
1159
bool pipeline_cache_enabled = false;
1160
size_t pipeline_cache_size = 0;
1161
String pipeline_cache_file_path;
1162
WorkerThreadPool::TaskID pipeline_cache_save_task = WorkerThreadPool::INVALID_TASK_ID;
1163
1164
Vector<uint8_t> _load_pipeline_cache();
1165
void _update_pipeline_cache(bool p_closing = false);
1166
static void _save_pipeline_cache(void *p_data);
1167
1168
struct ComputePipeline {
1169
RID shader;
1170
RDD::ShaderID shader_driver_id;
1171
uint32_t shader_layout_hash = 0;
1172
Vector<uint32_t> set_formats;
1173
RDD::PipelineID driver_id;
1174
uint32_t push_constant_size = 0;
1175
uint32_t local_group_size[3] = { 0, 0, 0 };
1176
};
1177
1178
RID_Owner<ComputePipeline, true> compute_pipeline_owner;
1179
1180
public:
1181
RID render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags = 0, uint32_t p_for_render_pass = 0, const Vector<PipelineSpecializationConstant> &p_specialization_constants = Vector<PipelineSpecializationConstant>());
1182
bool render_pipeline_is_valid(RID p_pipeline);
1183
1184
RID compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants = Vector<PipelineSpecializationConstant>());
1185
bool compute_pipeline_is_valid(RID p_pipeline);
1186
1187
private:
1188
/****************/
1189
/**** SCREEN ****/
1190
/****************/
1191
HashMap<DisplayServer::WindowID, RDD::SwapChainID> screen_swap_chains;
1192
HashMap<DisplayServer::WindowID, RDD::FramebufferID> screen_framebuffers;
1193
1194
uint32_t _get_swap_chain_desired_count() const;
1195
1196
public:
1197
Error screen_create(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1198
Error screen_prepare_for_drawing(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1199
int screen_get_width(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1200
int screen_get_height(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1201
int screen_get_pre_rotation_degrees(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1202
FramebufferFormatID screen_get_framebuffer_format(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID) const;
1203
Error screen_free(DisplayServer::WindowID p_screen = DisplayServer::MAIN_WINDOW_ID);
1204
1205
/*************************/
1206
/**** DRAW LISTS (II) ****/
1207
/*************************/
1208
1209
private:
1210
// Draw list contains both the command buffer
1211
// used for drawing as well as a LOT of
1212
// information used for validation. This
1213
// validation is cheap so most of it can
1214
// also run in release builds.
1215
1216
struct DrawList {
1217
Rect2i viewport;
1218
bool active = false;
1219
1220
struct SetState {
1221
uint32_t pipeline_expected_format = 0;
1222
uint32_t uniform_set_format = 0;
1223
RDD::UniformSetID uniform_set_driver_id;
1224
RID uniform_set;
1225
bool bound = false;
1226
};
1227
1228
struct State {
1229
SetState sets[MAX_UNIFORM_SETS];
1230
uint32_t set_count = 0;
1231
RID pipeline;
1232
RID pipeline_shader;
1233
RDD::ShaderID pipeline_shader_driver_id;
1234
uint32_t pipeline_shader_layout_hash = 0;
1235
uint32_t pipeline_push_constant_size = 0;
1236
RID vertex_array;
1237
RID index_array;
1238
uint32_t draw_count = 0;
1239
} state;
1240
1241
#ifdef DEBUG_ENABLED
1242
struct Validation {
1243
// Actual render pass values.
1244
uint32_t dynamic_state = 0;
1245
VertexFormatID vertex_format = INVALID_ID;
1246
uint32_t vertex_array_size = 0;
1247
uint32_t vertex_max_instances_allowed = 0xFFFFFFFF;
1248
bool index_buffer_uses_restart_indices = false;
1249
uint32_t index_array_count = 0;
1250
uint32_t index_array_max_index = 0;
1251
Vector<uint32_t> set_formats;
1252
Vector<bool> set_bound;
1253
Vector<RID> set_rids;
1254
// Last pipeline set values.
1255
bool pipeline_active = false;
1256
uint32_t pipeline_dynamic_state = 0;
1257
VertexFormatID pipeline_vertex_format = INVALID_ID;
1258
RID pipeline_shader;
1259
bool pipeline_uses_restart_indices = false;
1260
uint32_t pipeline_primitive_divisor = 0;
1261
uint32_t pipeline_primitive_minimum = 0;
1262
uint32_t pipeline_push_constant_size = 0;
1263
bool pipeline_push_constant_supplied = false;
1264
} validation;
1265
#else
1266
struct Validation {
1267
uint32_t vertex_array_size = 0;
1268
uint32_t index_array_count = 0;
1269
} validation;
1270
#endif
1271
};
1272
1273
DrawList draw_list;
1274
uint32_t draw_list_subpass_count = 0;
1275
#ifdef DEBUG_ENABLED
1276
FramebufferFormatID draw_list_framebuffer_format = INVALID_ID;
1277
#endif
1278
uint32_t draw_list_current_subpass = 0;
1279
1280
LocalVector<RID> draw_list_bound_textures;
1281
1282
void _draw_list_start(const Rect2i &p_viewport);
1283
void _draw_list_end(Rect2i *r_last_viewport = nullptr);
1284
1285
public:
1286
enum DrawFlags {
1287
DRAW_DEFAULT_ALL = 0,
1288
DRAW_CLEAR_COLOR_0 = (1 << 0),
1289
DRAW_CLEAR_COLOR_1 = (1 << 1),
1290
DRAW_CLEAR_COLOR_2 = (1 << 2),
1291
DRAW_CLEAR_COLOR_3 = (1 << 3),
1292
DRAW_CLEAR_COLOR_4 = (1 << 4),
1293
DRAW_CLEAR_COLOR_5 = (1 << 5),
1294
DRAW_CLEAR_COLOR_6 = (1 << 6),
1295
DRAW_CLEAR_COLOR_7 = (1 << 7),
1296
DRAW_CLEAR_COLOR_MASK = 0xFF,
1297
DRAW_CLEAR_COLOR_ALL = DRAW_CLEAR_COLOR_MASK,
1298
DRAW_IGNORE_COLOR_0 = (1 << 8),
1299
DRAW_IGNORE_COLOR_1 = (1 << 9),
1300
DRAW_IGNORE_COLOR_2 = (1 << 10),
1301
DRAW_IGNORE_COLOR_3 = (1 << 11),
1302
DRAW_IGNORE_COLOR_4 = (1 << 12),
1303
DRAW_IGNORE_COLOR_5 = (1 << 13),
1304
DRAW_IGNORE_COLOR_6 = (1 << 14),
1305
DRAW_IGNORE_COLOR_7 = (1 << 15),
1306
DRAW_IGNORE_COLOR_MASK = 0xFF00,
1307
DRAW_IGNORE_COLOR_ALL = DRAW_IGNORE_COLOR_MASK,
1308
DRAW_CLEAR_DEPTH = (1 << 16),
1309
DRAW_IGNORE_DEPTH = (1 << 17),
1310
DRAW_CLEAR_STENCIL = (1 << 18),
1311
DRAW_IGNORE_STENCIL = (1 << 19),
1312
DRAW_CLEAR_ALL = DRAW_CLEAR_COLOR_ALL | DRAW_CLEAR_DEPTH | DRAW_CLEAR_STENCIL,
1313
DRAW_IGNORE_ALL = DRAW_IGNORE_COLOR_ALL | DRAW_IGNORE_DEPTH | DRAW_IGNORE_STENCIL
1314
};
1315
1316
DrawListID draw_list_begin_for_screen(DisplayServer::WindowID p_screen = 0, const Color &p_clear_color = Color());
1317
DrawListID draw_list_begin(RID p_framebuffer, BitField<DrawFlags> p_draw_flags = DRAW_DEFAULT_ALL, VectorView<Color> p_clear_color_values = VectorView<Color>(), float p_clear_depth_value = 1.0f, uint32_t p_clear_stencil_value = 0, const Rect2 &p_region = Rect2(), uint32_t p_breadcrumb = 0);
1318
DrawListID _draw_list_begin_bind(RID p_framebuffer, BitField<DrawFlags> p_draw_flags = DRAW_DEFAULT_ALL, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth_value = 1.0f, uint32_t p_clear_stencil_value = 0, const Rect2 &p_region = Rect2(), uint32_t p_breadcrumb = 0);
1319
1320
void draw_list_set_blend_constants(DrawListID p_list, const Color &p_color);
1321
void draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline);
1322
void draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index);
1323
void draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array);
1324
void draw_list_bind_index_array(DrawListID p_list, RID p_index_array);
1325
void draw_list_set_line_width(DrawListID p_list, float p_width);
1326
void draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size);
1327
1328
void draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances = 1, uint32_t p_procedural_vertices = 0);
1329
void draw_list_draw_indirect(DrawListID p_list, bool p_use_indices, RID p_buffer, uint32_t p_offset = 0, uint32_t p_draw_count = 1, uint32_t p_stride = 0);
1330
1331
void draw_list_set_viewport(DrawListID p_list, const Rect2 &p_rect);
1332
void draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect);
1333
void draw_list_disable_scissor(DrawListID p_list);
1334
1335
uint32_t draw_list_get_current_pass();
1336
DrawListID draw_list_switch_to_next_pass();
1337
1338
void draw_list_end();
1339
1340
private:
1341
/***********************/
1342
/**** COMPUTE LISTS ****/
1343
/***********************/
1344
1345
struct ComputeList {
1346
bool active = false;
1347
struct SetState {
1348
uint32_t pipeline_expected_format = 0;
1349
uint32_t uniform_set_format = 0;
1350
RDD::UniformSetID uniform_set_driver_id;
1351
RID uniform_set;
1352
bool bound = false;
1353
};
1354
1355
struct State {
1356
SetState sets[MAX_UNIFORM_SETS];
1357
uint32_t set_count = 0;
1358
RID pipeline;
1359
RID pipeline_shader;
1360
RDD::ShaderID pipeline_shader_driver_id;
1361
uint32_t pipeline_shader_layout_hash = 0;
1362
uint32_t local_group_size[3] = { 0, 0, 0 };
1363
uint8_t push_constant_data[MAX_PUSH_CONSTANT_SIZE] = {};
1364
uint32_t push_constant_size = 0;
1365
uint32_t dispatch_count = 0;
1366
} state;
1367
1368
#ifdef DEBUG_ENABLED
1369
struct Validation {
1370
Vector<uint32_t> set_formats;
1371
Vector<bool> set_bound;
1372
Vector<RID> set_rids;
1373
// Last pipeline set values.
1374
bool pipeline_active = false;
1375
RID pipeline_shader;
1376
uint32_t invalid_set_from = 0;
1377
uint32_t pipeline_push_constant_size = 0;
1378
bool pipeline_push_constant_supplied = false;
1379
} validation;
1380
#endif
1381
};
1382
1383
ComputeList compute_list;
1384
ComputeList::State compute_list_barrier_state;
1385
1386
public:
1387
ComputeListID compute_list_begin();
1388
void compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline);
1389
void compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index);
1390
void compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size);
1391
void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups);
1392
void compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads);
1393
void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset);
1394
void compute_list_add_barrier(ComputeListID p_list);
1395
1396
void compute_list_end();
1397
1398
private:
1399
/*************************/
1400
/**** TRANSFER WORKER ****/
1401
/*************************/
1402
1403
struct TransferWorker {
1404
uint32_t index = 0;
1405
RDD::BufferID staging_buffer;
1406
uint32_t max_transfer_size = 0;
1407
uint32_t staging_buffer_size_in_use = 0;
1408
uint32_t staging_buffer_size_allocated = 0;
1409
RDD::CommandBufferID command_buffer;
1410
RDD::CommandPoolID command_pool;
1411
RDD::FenceID command_fence;
1412
LocalVector<RDD::TextureBarrier> texture_barriers;
1413
bool recording = false;
1414
bool submitted = false;
1415
BinaryMutex thread_mutex;
1416
uint64_t operations_processed = 0;
1417
uint64_t operations_submitted = 0;
1418
uint64_t operations_counter = 0;
1419
BinaryMutex operations_mutex;
1420
};
1421
1422
LocalVector<TransferWorker *> transfer_worker_pool;
1423
uint32_t transfer_worker_pool_max_size = 1;
1424
LocalVector<uint64_t> transfer_worker_operation_used_by_draw;
1425
LocalVector<uint32_t> transfer_worker_pool_available_list;
1426
LocalVector<RDD::TextureBarrier> transfer_worker_pool_texture_barriers;
1427
BinaryMutex transfer_worker_pool_mutex;
1428
BinaryMutex transfer_worker_pool_texture_barriers_mutex;
1429
ConditionVariable transfer_worker_pool_condition;
1430
1431
TransferWorker *_acquire_transfer_worker(uint32_t p_transfer_size, uint32_t p_required_align, uint32_t &r_staging_offset);
1432
void _release_transfer_worker(TransferWorker *p_transfer_worker);
1433
void _end_transfer_worker(TransferWorker *p_transfer_worker);
1434
void _submit_transfer_worker(TransferWorker *p_transfer_worker, VectorView<RDD::SemaphoreID> p_signal_semaphores = VectorView<RDD::SemaphoreID>());
1435
void _wait_for_transfer_worker(TransferWorker *p_transfer_worker);
1436
void _flush_barriers_for_transfer_worker(TransferWorker *p_transfer_worker);
1437
void _check_transfer_worker_operation(uint32_t p_transfer_worker_index, uint64_t p_transfer_worker_operation);
1438
void _check_transfer_worker_buffer(Buffer *p_buffer);
1439
void _check_transfer_worker_texture(Texture *p_texture);
1440
void _check_transfer_worker_vertex_array(VertexArray *p_vertex_array);
1441
void _check_transfer_worker_index_array(IndexArray *p_index_array);
1442
void _submit_transfer_workers(RDD::CommandBufferID p_draw_command_buffer = RDD::CommandBufferID());
1443
void _submit_transfer_barriers(RDD::CommandBufferID p_draw_command_buffer);
1444
void _wait_for_transfer_workers();
1445
void _free_transfer_workers();
1446
1447
/***********************/
1448
/**** COMMAND GRAPH ****/
1449
/***********************/
1450
1451
bool _texture_make_mutable(Texture *p_texture, RID p_texture_id);
1452
bool _buffer_make_mutable(Buffer *p_buffer, RID p_buffer_id);
1453
bool _vertex_array_make_mutable(VertexArray *p_vertex_array, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1454
bool _index_array_make_mutable(IndexArray *p_index_array, RDG::ResourceTracker *p_resource_tracker);
1455
bool _uniform_set_make_mutable(UniformSet *p_uniform_set, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1456
bool _dependency_make_mutable(RID p_id, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker);
1457
bool _dependencies_make_mutable_recursive(RID p_id, RDG::ResourceTracker *p_resource_tracker);
1458
bool _dependencies_make_mutable(RID p_id, RDG::ResourceTracker *p_resource_tracker);
1459
1460
RenderingDeviceGraph draw_graph;
1461
1462
/**************************/
1463
/**** QUEUE MANAGEMENT ****/
1464
/**************************/
1465
1466
RDD::CommandQueueFamilyID main_queue_family;
1467
RDD::CommandQueueFamilyID transfer_queue_family;
1468
RDD::CommandQueueFamilyID present_queue_family;
1469
RDD::CommandQueueID main_queue;
1470
RDD::CommandQueueID transfer_queue;
1471
RDD::CommandQueueID present_queue;
1472
1473
/**************************/
1474
/**** FRAME MANAGEMENT ****/
1475
/**************************/
1476
1477
// This is the frame structure. There are normally
1478
// 3 of these (used for triple buffering), or 2
1479
// (double buffering). They are cycled constantly.
1480
//
1481
// It contains two command buffers, one that is
1482
// used internally for setting up (creating stuff)
1483
// and another used mostly for drawing.
1484
//
1485
// They also contains a list of things that need
1486
// to be disposed of when deleted, which can't
1487
// happen immediately due to the asynchronous
1488
// nature of the GPU. They will get deleted
1489
// when the frame is cycled.
1490
1491
struct Frame {
1492
// List in usage order, from last to free to first to free.
1493
List<Buffer> buffers_to_dispose_of;
1494
List<Texture> textures_to_dispose_of;
1495
List<Framebuffer> framebuffers_to_dispose_of;
1496
List<RDD::SamplerID> samplers_to_dispose_of;
1497
List<Shader> shaders_to_dispose_of;
1498
List<UniformSet> uniform_sets_to_dispose_of;
1499
List<RenderPipeline> render_pipelines_to_dispose_of;
1500
List<ComputePipeline> compute_pipelines_to_dispose_of;
1501
1502
// Pending asynchronous data transfer for buffers.
1503
LocalVector<RDD::BufferID> download_buffer_staging_buffers;
1504
LocalVector<RDD::BufferCopyRegion> download_buffer_copy_regions;
1505
LocalVector<BufferGetDataRequest> download_buffer_get_data_requests;
1506
1507
// Pending asynchronous data transfer for textures.
1508
LocalVector<RDD::BufferID> download_texture_staging_buffers;
1509
LocalVector<RDD::BufferTextureCopyRegion> download_buffer_texture_copy_regions;
1510
LocalVector<uint32_t> download_texture_mipmap_offsets;
1511
LocalVector<TextureGetDataRequest> download_texture_get_data_requests;
1512
1513
// The command pool used by the command buffer.
1514
RDD::CommandPoolID command_pool;
1515
1516
// The command buffer used by the main thread when recording the frame.
1517
RDD::CommandBufferID command_buffer;
1518
1519
// Signaled by the command buffer submission. Present must wait on this semaphore.
1520
RDD::SemaphoreID semaphore;
1521
1522
// Signaled by the command buffer submission. Must wait on this fence before beginning command recording for the frame.
1523
RDD::FenceID fence;
1524
bool fence_signaled = false;
1525
1526
// Semaphores the frame must wait on before executing the command buffer.
1527
LocalVector<RDD::SemaphoreID> semaphores_to_wait_on;
1528
1529
// Swap chains prepared for drawing during the frame that must be presented.
1530
LocalVector<RDD::SwapChainID> swap_chains_to_present;
1531
1532
// Semaphores the transfer workers can use to wait before rendering the frame.
1533
// This must have the same size of the transfer worker pool.
1534
TightLocalVector<RDD::SemaphoreID> transfer_worker_semaphores;
1535
1536
// Extra command buffer pool used for driver workarounds or to reduce GPU bubbles by
1537
// splitting the final render pass to the swapchain into its own cmd buffer.
1538
RDG::CommandBufferPool command_buffer_pool;
1539
1540
struct Timestamp {
1541
String description;
1542
uint64_t value = 0;
1543
};
1544
1545
RDD::QueryPoolID timestamp_pool;
1546
1547
TightLocalVector<String> timestamp_names;
1548
TightLocalVector<uint64_t> timestamp_cpu_values;
1549
uint32_t timestamp_count = 0;
1550
TightLocalVector<String> timestamp_result_names;
1551
TightLocalVector<uint64_t> timestamp_cpu_result_values;
1552
TightLocalVector<uint64_t> timestamp_result_values;
1553
uint32_t timestamp_result_count = 0;
1554
uint64_t index = 0;
1555
};
1556
1557
uint32_t max_timestamp_query_elements = 0;
1558
1559
int frame = 0;
1560
TightLocalVector<Frame> frames;
1561
uint64_t frames_drawn = 0;
1562
1563
// Whenever logic/physics request a graphics operation (not just deleting a resource) that requires
1564
// us to flush all graphics commands, we must set frames_pending_resources_for_processing = frames.size().
1565
// This is important for when the user requested for the logic loop to still be updated while
1566
// graphics should not (e.g. headless Multiplayer servers, minimized windows that need to still
1567
// process something on the background).
1568
uint32_t frames_pending_resources_for_processing = 0u;
1569
1570
public:
1571
bool has_pending_resources_for_processing() const { return frames_pending_resources_for_processing != 0u; }
1572
1573
private:
1574
void _free_pending_resources(int p_frame);
1575
1576
uint64_t texture_memory = 0;
1577
uint64_t buffer_memory = 0;
1578
1579
protected:
1580
void execute_chained_cmds(bool p_present_swap_chain,
1581
RenderingDeviceDriver::FenceID p_draw_fence,
1582
RenderingDeviceDriver::SemaphoreID p_dst_draw_semaphore_to_signal);
1583
1584
public:
1585
void _free_internal(RID p_id);
1586
void _begin_frame(bool p_presented = false);
1587
void _end_frame();
1588
void _execute_frame(bool p_present);
1589
void _stall_for_frame(uint32_t p_frame);
1590
void _stall_for_previous_frames();
1591
void _flush_and_stall_for_all_frames();
1592
1593
template <typename T>
1594
void _free_rids(T &p_owner, const char *p_type);
1595
1596
#ifdef DEV_ENABLED
1597
HashMap<RID, String> resource_names;
1598
#endif
1599
1600
public:
1601
Error initialize(RenderingContextDriver *p_context, DisplayServer::WindowID p_main_window = DisplayServer::INVALID_WINDOW_ID);
1602
void finalize();
1603
1604
void _set_max_fps(int p_max_fps);
1605
1606
void free(RID p_id);
1607
1608
/****************/
1609
/**** Timing ****/
1610
/****************/
1611
1612
void capture_timestamp(const String &p_name);
1613
uint32_t get_captured_timestamps_count() const;
1614
uint64_t get_captured_timestamps_frame() const;
1615
uint64_t get_captured_timestamp_gpu_time(uint32_t p_index) const;
1616
uint64_t get_captured_timestamp_cpu_time(uint32_t p_index) const;
1617
String get_captured_timestamp_name(uint32_t p_index) const;
1618
1619
/****************/
1620
/**** LIMITS ****/
1621
/****************/
1622
1623
uint64_t limit_get(Limit p_limit) const;
1624
1625
void swap_buffers(bool p_present);
1626
1627
uint32_t get_frame_delay() const;
1628
1629
void submit();
1630
void sync();
1631
1632
enum MemoryType {
1633
MEMORY_TEXTURES,
1634
MEMORY_BUFFERS,
1635
MEMORY_TOTAL
1636
};
1637
1638
uint64_t get_memory_usage(MemoryType p_type) const;
1639
1640
RenderingDevice *create_local_device();
1641
1642
void set_resource_name(RID p_id, const String &p_name);
1643
1644
void _draw_command_begin_label(String p_label_name, const Color &p_color = Color(1, 1, 1, 1));
1645
void draw_command_begin_label(const Span<char> p_label_name, const Color &p_color = Color(1, 1, 1, 1));
1646
void draw_command_end_label();
1647
1648
String get_device_vendor_name() const;
1649
String get_device_name() const;
1650
DeviceType get_device_type() const;
1651
String get_device_api_name() const;
1652
String get_device_api_version() const;
1653
String get_device_pipeline_cache_uuid() const;
1654
1655
bool is_composite_alpha_supported() const;
1656
1657
uint64_t get_driver_resource(DriverResource p_resource, RID p_rid = RID(), uint64_t p_index = 0);
1658
1659
String get_driver_and_device_memory_report() const;
1660
1661
String get_tracked_object_name(uint32_t p_type_index) const;
1662
uint64_t get_tracked_object_type_count() const;
1663
1664
uint64_t get_driver_total_memory() const;
1665
uint64_t get_driver_allocation_count() const;
1666
uint64_t get_driver_memory_by_object_type(uint32_t p_type) const;
1667
uint64_t get_driver_allocs_by_object_type(uint32_t p_type) const;
1668
1669
uint64_t get_device_total_memory() const;
1670
uint64_t get_device_allocation_count() const;
1671
uint64_t get_device_memory_by_object_type(uint32_t p_type) const;
1672
uint64_t get_device_allocs_by_object_type(uint32_t p_type) const;
1673
1674
static RenderingDevice *get_singleton();
1675
1676
void make_current();
1677
1678
RenderingDevice();
1679
~RenderingDevice();
1680
1681
private:
1682
/*****************/
1683
/**** BINDERS ****/
1684
/*****************/
1685
1686
RID _texture_create(const Ref<RDTextureFormat> &p_format, const Ref<RDTextureView> &p_view, const TypedArray<PackedByteArray> &p_data = Array());
1687
RID _texture_create_shared(const Ref<RDTextureView> &p_view, RID p_with_texture);
1688
RID _texture_create_shared_from_slice(const Ref<RDTextureView> &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D);
1689
Ref<RDTextureFormat> _texture_get_format(RID p_rd_texture);
1690
1691
FramebufferFormatID _framebuffer_format_create(const TypedArray<RDAttachmentFormat> &p_attachments, uint32_t p_view_count);
1692
FramebufferFormatID _framebuffer_format_create_multipass(const TypedArray<RDAttachmentFormat> &p_attachments, const TypedArray<RDFramebufferPass> &p_passes, uint32_t p_view_count);
1693
RID _framebuffer_create(const TypedArray<RID> &p_textures, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
1694
RID _framebuffer_create_multipass(const TypedArray<RID> &p_textures, const TypedArray<RDFramebufferPass> &p_passes, FramebufferFormatID p_format_check = INVALID_ID, uint32_t p_view_count = 1);
1695
1696
RID _sampler_create(const Ref<RDSamplerState> &p_state);
1697
1698
VertexFormatID _vertex_format_create(const TypedArray<RDVertexAttribute> &p_vertex_formats);
1699
RID _vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const TypedArray<RID> &p_src_buffers, const Vector<int64_t> &p_offsets = Vector<int64_t>());
1700
1701
Ref<RDShaderSPIRV> _shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache = true);
1702
Vector<uint8_t> _shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_bytecode, const String &p_shader_name = "");
1703
RID _shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name = "");
1704
1705
RID _uniform_set_create(const TypedArray<RDUniform> &p_uniforms, RID p_shader, uint32_t p_shader_set);
1706
1707
Error _buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data);
1708
1709
RID _render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const Ref<RDPipelineRasterizationState> &p_rasterization_state, const Ref<RDPipelineMultisampleState> &p_multisample_state, const Ref<RDPipelineDepthStencilState> &p_depth_stencil_state, const Ref<RDPipelineColorBlendState> &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants);
1710
RID _compute_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants);
1711
1712
void _draw_list_set_push_constant(DrawListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size);
1713
void _compute_list_set_push_constant(ComputeListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size);
1714
};
1715
1716
VARIANT_ENUM_CAST(RenderingDevice::DeviceType)
1717
VARIANT_ENUM_CAST(RenderingDevice::DriverResource)
1718
VARIANT_ENUM_CAST(RenderingDevice::ShaderStage)
1719
VARIANT_ENUM_CAST(RenderingDevice::ShaderLanguage)
1720
VARIANT_ENUM_CAST(RenderingDevice::CompareOperator)
1721
VARIANT_ENUM_CAST(RenderingDevice::DataFormat)
1722
VARIANT_ENUM_CAST(RenderingDevice::TextureType)
1723
VARIANT_ENUM_CAST(RenderingDevice::TextureSamples)
1724
VARIANT_BITFIELD_CAST(RenderingDevice::TextureUsageBits)
1725
VARIANT_ENUM_CAST(RenderingDevice::TextureSwizzle)
1726
VARIANT_ENUM_CAST(RenderingDevice::TextureSliceType)
1727
VARIANT_ENUM_CAST(RenderingDevice::SamplerFilter)
1728
VARIANT_ENUM_CAST(RenderingDevice::SamplerRepeatMode)
1729
VARIANT_ENUM_CAST(RenderingDevice::SamplerBorderColor)
1730
VARIANT_ENUM_CAST(RenderingDevice::VertexFrequency)
1731
VARIANT_ENUM_CAST(RenderingDevice::IndexBufferFormat)
1732
VARIANT_BITFIELD_CAST(RenderingDevice::StorageBufferUsage)
1733
VARIANT_BITFIELD_CAST(RenderingDevice::BufferCreationBits)
1734
VARIANT_ENUM_CAST(RenderingDevice::UniformType)
1735
VARIANT_ENUM_CAST(RenderingDevice::RenderPrimitive)
1736
VARIANT_ENUM_CAST(RenderingDevice::PolygonCullMode)
1737
VARIANT_ENUM_CAST(RenderingDevice::PolygonFrontFace)
1738
VARIANT_ENUM_CAST(RenderingDevice::StencilOperation)
1739
VARIANT_ENUM_CAST(RenderingDevice::LogicOperation)
1740
VARIANT_ENUM_CAST(RenderingDevice::BlendFactor)
1741
VARIANT_ENUM_CAST(RenderingDevice::BlendOperation)
1742
VARIANT_BITFIELD_CAST(RenderingDevice::PipelineDynamicStateFlags)
1743
VARIANT_ENUM_CAST(RenderingDevice::PipelineSpecializationConstantType)
1744
VARIANT_ENUM_CAST(RenderingDevice::Limit)
1745
VARIANT_ENUM_CAST(RenderingDevice::MemoryType)
1746
VARIANT_ENUM_CAST(RenderingDevice::Features)
1747
VARIANT_ENUM_CAST(RenderingDevice::BreadcrumbMarker)
1748
VARIANT_BITFIELD_CAST(RenderingDevice::DrawFlags);
1749
1750
#ifndef DISABLE_DEPRECATED
1751
VARIANT_BITFIELD_CAST(RenderingDevice::BarrierMask);
1752
VARIANT_ENUM_CAST(RenderingDevice::InitialAction)
1753
VARIANT_ENUM_CAST(RenderingDevice::FinalAction)
1754
#endif
1755
1756
typedef RenderingDevice RD;
1757
1758