Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/servers/rendering/rendering_shader_container.cpp
10277 views
1
/**************************************************************************/
2
/* rendering_shader_container.cpp */
3
/**************************************************************************/
4
/* This file is part of: */
5
/* GODOT ENGINE */
6
/* https://godotengine.org */
7
/**************************************************************************/
8
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10
/* */
11
/* Permission is hereby granted, free of charge, to any person obtaining */
12
/* a copy of this software and associated documentation files (the */
13
/* "Software"), to deal in the Software without restriction, including */
14
/* without limitation the rights to use, copy, modify, merge, publish, */
15
/* distribute, sublicense, and/or sell copies of the Software, and to */
16
/* permit persons to whom the Software is furnished to do so, subject to */
17
/* the following conditions: */
18
/* */
19
/* The above copyright notice and this permission notice shall be */
20
/* included in all copies or substantial portions of the Software. */
21
/* */
22
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29
/**************************************************************************/
30
31
#include "rendering_shader_container.h"
32
33
#include "core/io/compression.h"
34
35
static inline uint32_t aligned_to(uint32_t p_size, uint32_t p_alignment) {
36
if (p_size % p_alignment) {
37
return p_size + (p_alignment - (p_size % p_alignment));
38
} else {
39
return p_size;
40
}
41
}
42
43
uint32_t RenderingShaderContainer::_from_bytes_header_extra_data(const uint8_t *p_bytes) {
44
return 0;
45
}
46
47
uint32_t RenderingShaderContainer::_from_bytes_reflection_extra_data(const uint8_t *p_bytes) {
48
return 0;
49
}
50
51
uint32_t RenderingShaderContainer::_from_bytes_reflection_binding_uniform_extra_data_start(const uint8_t *p_bytes) {
52
return 0;
53
}
54
55
uint32_t RenderingShaderContainer::_from_bytes_reflection_binding_uniform_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
56
return 0;
57
}
58
59
uint32_t RenderingShaderContainer::_from_bytes_reflection_specialization_extra_data_start(const uint8_t *p_bytes) {
60
return 0;
61
}
62
63
uint32_t RenderingShaderContainer::_from_bytes_reflection_specialization_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
64
return 0;
65
}
66
67
uint32_t RenderingShaderContainer::_from_bytes_shader_extra_data_start(const uint8_t *p_bytes) {
68
return 0;
69
}
70
71
uint32_t RenderingShaderContainer::_from_bytes_shader_extra_data(const uint8_t *p_bytes, uint32_t p_index) {
72
return 0;
73
}
74
75
uint32_t RenderingShaderContainer::_from_bytes_footer_extra_data(const uint8_t *p_bytes) {
76
return 0;
77
}
78
79
uint32_t RenderingShaderContainer::_to_bytes_header_extra_data(uint8_t *) const {
80
return 0;
81
}
82
83
uint32_t RenderingShaderContainer::_to_bytes_reflection_extra_data(uint8_t *) const {
84
return 0;
85
}
86
87
uint32_t RenderingShaderContainer::_to_bytes_reflection_binding_uniform_extra_data(uint8_t *, uint32_t) const {
88
return 0;
89
}
90
91
uint32_t RenderingShaderContainer::_to_bytes_reflection_specialization_extra_data(uint8_t *, uint32_t) const {
92
return 0;
93
}
94
95
uint32_t RenderingShaderContainer::_to_bytes_shader_extra_data(uint8_t *, uint32_t) const {
96
return 0;
97
}
98
99
uint32_t RenderingShaderContainer::_to_bytes_footer_extra_data(uint8_t *) const {
100
return 0;
101
}
102
103
void RenderingShaderContainer::_set_from_shader_reflection_post(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) {
104
// Do nothing.
105
}
106
107
void RenderingShaderContainer::set_from_shader_reflection(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) {
108
reflection_binding_set_uniforms_count.clear();
109
reflection_binding_set_uniforms_data.clear();
110
reflection_specialization_data.clear();
111
reflection_shader_stages.clear();
112
113
shader_name = p_shader_name.utf8();
114
115
reflection_data.vertex_input_mask = p_reflection.vertex_input_mask;
116
reflection_data.fragment_output_mask = p_reflection.fragment_output_mask;
117
reflection_data.specialization_constants_count = p_reflection.specialization_constants.size();
118
reflection_data.is_compute = p_reflection.is_compute;
119
reflection_data.has_multiview = p_reflection.has_multiview;
120
reflection_data.compute_local_size[0] = p_reflection.compute_local_size[0];
121
reflection_data.compute_local_size[1] = p_reflection.compute_local_size[1];
122
reflection_data.compute_local_size[2] = p_reflection.compute_local_size[2];
123
reflection_data.set_count = p_reflection.uniform_sets.size();
124
reflection_data.push_constant_size = p_reflection.push_constant_size;
125
reflection_data.push_constant_stages_mask = uint32_t(p_reflection.push_constant_stages);
126
reflection_data.shader_name_len = shader_name.length();
127
128
ReflectionBindingData binding_data;
129
for (const Vector<RenderingDeviceCommons::ShaderUniform> &uniform_set : p_reflection.uniform_sets) {
130
for (const RenderingDeviceCommons::ShaderUniform &uniform : uniform_set) {
131
binding_data.type = uint32_t(uniform.type);
132
binding_data.binding = uniform.binding;
133
binding_data.stages = uint32_t(uniform.stages);
134
binding_data.length = uniform.length;
135
binding_data.writable = uint32_t(uniform.writable);
136
reflection_binding_set_uniforms_data.push_back(binding_data);
137
}
138
139
reflection_binding_set_uniforms_count.push_back(uniform_set.size());
140
}
141
142
ReflectionSpecializationData specialization_data;
143
for (const RenderingDeviceCommons::ShaderSpecializationConstant &spec : p_reflection.specialization_constants) {
144
specialization_data.type = uint32_t(spec.type);
145
specialization_data.constant_id = spec.constant_id;
146
specialization_data.int_value = spec.int_value;
147
specialization_data.stage_flags = uint32_t(spec.stages);
148
reflection_specialization_data.push_back(specialization_data);
149
}
150
151
for (uint32_t i = 0; i < RenderingDeviceCommons::SHADER_STAGE_MAX; i++) {
152
if (p_reflection.stages_bits.has_flag(RenderingDeviceCommons::ShaderStage(1U << i))) {
153
reflection_shader_stages.push_back(RenderingDeviceCommons::ShaderStage(i));
154
}
155
}
156
157
reflection_data.stage_count = reflection_shader_stages.size();
158
159
_set_from_shader_reflection_post(p_shader_name, p_reflection);
160
}
161
162
bool RenderingShaderContainer::set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) {
163
return _set_code_from_spirv(p_spirv);
164
}
165
166
RenderingDeviceCommons::ShaderReflection RenderingShaderContainer::get_shader_reflection() const {
167
RenderingDeviceCommons::ShaderReflection shader_refl;
168
shader_refl.push_constant_size = reflection_data.push_constant_size;
169
shader_refl.push_constant_stages = reflection_data.push_constant_stages_mask;
170
shader_refl.vertex_input_mask = reflection_data.vertex_input_mask;
171
shader_refl.fragment_output_mask = reflection_data.fragment_output_mask;
172
shader_refl.is_compute = reflection_data.is_compute;
173
shader_refl.has_multiview = reflection_data.has_multiview;
174
shader_refl.compute_local_size[0] = reflection_data.compute_local_size[0];
175
shader_refl.compute_local_size[1] = reflection_data.compute_local_size[1];
176
shader_refl.compute_local_size[2] = reflection_data.compute_local_size[2];
177
shader_refl.uniform_sets.resize(reflection_data.set_count);
178
shader_refl.specialization_constants.resize(reflection_data.specialization_constants_count);
179
shader_refl.stages_vector.resize(reflection_data.stage_count);
180
181
DEV_ASSERT(reflection_binding_set_uniforms_count.size() == reflection_data.set_count && "The amount of elements in the reflection and the shader container can't be different.");
182
uint32_t uniform_index = 0;
183
for (uint32_t i = 0; i < reflection_data.set_count; i++) {
184
Vector<RenderingDeviceCommons::ShaderUniform> &uniform_set = shader_refl.uniform_sets.ptrw()[i];
185
uint32_t uniforms_count = reflection_binding_set_uniforms_count[i];
186
uniform_set.resize(uniforms_count);
187
for (uint32_t j = 0; j < uniforms_count; j++) {
188
const ReflectionBindingData &binding = reflection_binding_set_uniforms_data[uniform_index++];
189
RenderingDeviceCommons::ShaderUniform &uniform = uniform_set.ptrw()[j];
190
uniform.type = RenderingDeviceCommons::UniformType(binding.type);
191
uniform.writable = binding.writable;
192
uniform.length = binding.length;
193
uniform.binding = binding.binding;
194
uniform.stages = binding.stages;
195
}
196
}
197
198
shader_refl.specialization_constants.resize(reflection_data.specialization_constants_count);
199
for (uint32_t i = 0; i < reflection_data.specialization_constants_count; i++) {
200
const ReflectionSpecializationData &spec = reflection_specialization_data[i];
201
RenderingDeviceCommons::ShaderSpecializationConstant &sc = shader_refl.specialization_constants.ptrw()[i];
202
sc.type = RenderingDeviceCommons::PipelineSpecializationConstantType(spec.type);
203
sc.constant_id = spec.constant_id;
204
sc.int_value = spec.int_value;
205
sc.stages = spec.stage_flags;
206
}
207
208
shader_refl.stages_vector.resize(reflection_data.stage_count);
209
for (uint32_t i = 0; i < reflection_data.stage_count; i++) {
210
shader_refl.stages_vector.set(i, reflection_shader_stages[i]);
211
shader_refl.stages_bits.set_flag(RenderingDeviceCommons::ShaderStage(1U << reflection_shader_stages[i]));
212
}
213
214
return shader_refl;
215
}
216
217
bool RenderingShaderContainer::from_bytes(const PackedByteArray &p_bytes) {
218
const uint64_t alignment = sizeof(uint32_t);
219
const uint8_t *bytes_ptr = p_bytes.ptr();
220
uint64_t bytes_offset = 0;
221
222
// Read container header.
223
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ContainerHeader)) > p_bytes.size(), false, "Not enough bytes for a container header in shader container.");
224
const ContainerHeader &container_header = *(const ContainerHeader *)(&bytes_ptr[bytes_offset]);
225
bytes_offset += sizeof(ContainerHeader);
226
bytes_offset += _from_bytes_header_extra_data(&bytes_ptr[bytes_offset]);
227
228
ERR_FAIL_COND_V_MSG(container_header.magic_number != CONTAINER_MAGIC_NUMBER, false, "Incorrect magic number in shader container.");
229
ERR_FAIL_COND_V_MSG(container_header.version > CONTAINER_VERSION, false, "Unsupported version in shader container.");
230
ERR_FAIL_COND_V_MSG(container_header.format != _format(), false, "Incorrect format in shader container.");
231
ERR_FAIL_COND_V_MSG(container_header.format_version > _format_version(), false, "Unsupported format version in shader container.");
232
233
// Adjust shaders to the size indicated by the container header.
234
shaders.resize(container_header.shader_count);
235
236
// Read reflection data.
237
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionData)) > p_bytes.size(), false, "Not enough bytes for reflection data in shader container.");
238
reflection_data = *(const ReflectionData *)(&bytes_ptr[bytes_offset]);
239
bytes_offset += sizeof(ReflectionData);
240
bytes_offset += _from_bytes_reflection_extra_data(&bytes_ptr[bytes_offset]);
241
242
// Read shader name.
243
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + reflection_data.shader_name_len) > p_bytes.size(), false, "Not enough bytes for shader name in shader container.");
244
if (reflection_data.shader_name_len > 0) {
245
String shader_name_str;
246
shader_name_str.append_utf8((const char *)(&bytes_ptr[bytes_offset]), reflection_data.shader_name_len);
247
shader_name = shader_name_str.utf8();
248
bytes_offset = aligned_to(bytes_offset + reflection_data.shader_name_len, alignment);
249
} else {
250
shader_name = CharString();
251
}
252
253
reflection_binding_set_uniforms_count.resize(reflection_data.set_count);
254
reflection_binding_set_uniforms_data.clear();
255
256
uint32_t uniform_index = 0;
257
for (uint32_t i = 0; i < reflection_data.set_count; i++) {
258
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(uint32_t)) > p_bytes.size(), false, "Not enough bytes for uniform set count in shader container.");
259
uint32_t uniforms_count = *(uint32_t *)(&bytes_ptr[bytes_offset]);
260
reflection_binding_set_uniforms_count.ptrw()[i] = uniforms_count;
261
bytes_offset += sizeof(uint32_t);
262
263
reflection_binding_set_uniforms_data.resize(reflection_binding_set_uniforms_data.size() + uniforms_count);
264
bytes_offset += _from_bytes_reflection_binding_uniform_extra_data_start(&bytes_ptr[bytes_offset]);
265
266
for (uint32_t j = 0; j < uniforms_count; j++) {
267
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionBindingData)) > p_bytes.size(), false, "Not enough bytes for uniform in shader container.");
268
memcpy(&reflection_binding_set_uniforms_data.ptrw()[uniform_index], &bytes_ptr[bytes_offset], sizeof(ReflectionBindingData));
269
bytes_offset += sizeof(ReflectionBindingData);
270
bytes_offset += _from_bytes_reflection_binding_uniform_extra_data(&bytes_ptr[bytes_offset], uniform_index);
271
uniform_index++;
272
}
273
}
274
275
reflection_specialization_data.resize(reflection_data.specialization_constants_count);
276
bytes_offset += _from_bytes_reflection_specialization_extra_data_start(&bytes_ptr[bytes_offset]);
277
278
for (uint32_t i = 0; i < reflection_data.specialization_constants_count; i++) {
279
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ReflectionSpecializationData)) > p_bytes.size(), false, "Not enough bytes for specialization in shader container.");
280
memcpy(&reflection_specialization_data.ptrw()[i], &bytes_ptr[bytes_offset], sizeof(ReflectionSpecializationData));
281
bytes_offset += sizeof(ReflectionSpecializationData);
282
bytes_offset += _from_bytes_reflection_specialization_extra_data(&bytes_ptr[bytes_offset], i);
283
}
284
285
const uint32_t stage_count = reflection_data.stage_count;
286
if (stage_count > 0) {
287
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + stage_count * sizeof(RenderingDeviceCommons::ShaderStage)) > p_bytes.size(), false, "Not enough bytes for stages in shader container.");
288
reflection_shader_stages.resize(stage_count);
289
bytes_offset += _from_bytes_shader_extra_data_start(&bytes_ptr[bytes_offset]);
290
memcpy(reflection_shader_stages.ptrw(), &bytes_ptr[bytes_offset], stage_count * sizeof(RenderingDeviceCommons::ShaderStage));
291
bytes_offset += stage_count * sizeof(RenderingDeviceCommons::ShaderStage);
292
}
293
294
// Read shaders.
295
for (int64_t i = 0; i < shaders.size(); i++) {
296
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + sizeof(ShaderHeader)) > p_bytes.size(), false, "Not enough bytes for shader header in shader container.");
297
const ShaderHeader &header = *(const ShaderHeader *)(&bytes_ptr[bytes_offset]);
298
bytes_offset += sizeof(ShaderHeader);
299
300
ERR_FAIL_COND_V_MSG(int64_t(bytes_offset + header.code_compressed_size) > p_bytes.size(), false, "Not enough bytes for a shader in shader container.");
301
Shader &shader = shaders.ptrw()[i];
302
shader.shader_stage = RenderingDeviceCommons::ShaderStage(header.shader_stage);
303
shader.code_compression_flags = header.code_compression_flags;
304
shader.code_decompressed_size = header.code_decompressed_size;
305
shader.code_compressed_bytes.resize(header.code_compressed_size);
306
memcpy(shader.code_compressed_bytes.ptrw(), &bytes_ptr[bytes_offset], header.code_compressed_size);
307
bytes_offset = aligned_to(bytes_offset + header.code_compressed_size, alignment);
308
bytes_offset += _from_bytes_shader_extra_data(&bytes_ptr[bytes_offset], i);
309
}
310
311
bytes_offset += _from_bytes_footer_extra_data(&bytes_ptr[bytes_offset]);
312
313
ERR_FAIL_COND_V_MSG(bytes_offset != (uint64_t)p_bytes.size(), false, "Amount of bytes in the container does not match the amount of bytes read.");
314
return true;
315
}
316
317
PackedByteArray RenderingShaderContainer::to_bytes() const {
318
// Compute the exact size the container will require for writing everything out.
319
const uint64_t alignment = sizeof(uint32_t);
320
uint64_t total_size = 0;
321
total_size += sizeof(ContainerHeader) + _to_bytes_header_extra_data(nullptr);
322
total_size += sizeof(ReflectionData) + _to_bytes_reflection_extra_data(nullptr);
323
total_size += aligned_to(reflection_data.shader_name_len, alignment);
324
total_size += reflection_binding_set_uniforms_count.size() * sizeof(uint32_t);
325
total_size += reflection_binding_set_uniforms_data.size() * sizeof(ReflectionBindingData);
326
total_size += reflection_specialization_data.size() * sizeof(ReflectionSpecializationData);
327
total_size += reflection_shader_stages.size() * sizeof(RenderingDeviceCommons::ShaderStage);
328
329
for (uint32_t i = 0; i < reflection_binding_set_uniforms_data.size(); i++) {
330
total_size += _to_bytes_reflection_binding_uniform_extra_data(nullptr, i);
331
}
332
333
for (uint32_t i = 0; i < reflection_specialization_data.size(); i++) {
334
total_size += _to_bytes_reflection_specialization_extra_data(nullptr, i);
335
}
336
337
for (uint32_t i = 0; i < shaders.size(); i++) {
338
total_size += sizeof(ShaderHeader);
339
total_size += shaders[i].code_compressed_bytes.size();
340
total_size = aligned_to(total_size, alignment);
341
total_size += _to_bytes_shader_extra_data(nullptr, i);
342
}
343
344
total_size += _to_bytes_footer_extra_data(nullptr);
345
346
// Create the array that will hold all of the data.
347
PackedByteArray bytes;
348
bytes.resize_initialized(total_size);
349
350
// Write out the data to the array.
351
uint64_t bytes_offset = 0;
352
uint8_t *bytes_ptr = bytes.ptrw();
353
ContainerHeader &container_header = *(ContainerHeader *)(&bytes_ptr[bytes_offset]);
354
container_header.magic_number = CONTAINER_MAGIC_NUMBER;
355
container_header.version = CONTAINER_VERSION;
356
container_header.format = _format();
357
container_header.format_version = _format_version();
358
container_header.shader_count = shaders.size();
359
bytes_offset += sizeof(ContainerHeader);
360
bytes_offset += _to_bytes_header_extra_data(&bytes_ptr[bytes_offset]);
361
362
memcpy(&bytes_ptr[bytes_offset], &reflection_data, sizeof(ReflectionData));
363
bytes_offset += sizeof(ReflectionData);
364
bytes_offset += _to_bytes_reflection_extra_data(&bytes_ptr[bytes_offset]);
365
366
if (shader_name.size() > 0) {
367
memcpy(&bytes_ptr[bytes_offset], shader_name.ptr(), reflection_data.shader_name_len);
368
bytes_offset = aligned_to(bytes_offset + reflection_data.shader_name_len, alignment);
369
}
370
371
uint32_t uniform_index = 0;
372
for (uint32_t uniform_count : reflection_binding_set_uniforms_count) {
373
memcpy(&bytes_ptr[bytes_offset], &uniform_count, sizeof(uniform_count));
374
bytes_offset += sizeof(uint32_t);
375
376
for (uint32_t i = 0; i < uniform_count; i++) {
377
memcpy(&bytes_ptr[bytes_offset], &reflection_binding_set_uniforms_data[uniform_index], sizeof(ReflectionBindingData));
378
bytes_offset += sizeof(ReflectionBindingData);
379
bytes_offset += _to_bytes_reflection_binding_uniform_extra_data(&bytes_ptr[bytes_offset], uniform_index);
380
uniform_index++;
381
}
382
}
383
384
for (uint32_t i = 0; i < reflection_specialization_data.size(); i++) {
385
memcpy(&bytes_ptr[bytes_offset], &reflection_specialization_data.ptr()[i], sizeof(ReflectionSpecializationData));
386
bytes_offset += sizeof(ReflectionSpecializationData);
387
bytes_offset += _to_bytes_reflection_specialization_extra_data(&bytes_ptr[bytes_offset], i);
388
}
389
390
if (!reflection_shader_stages.is_empty()) {
391
uint32_t stage_count = reflection_shader_stages.size();
392
memcpy(&bytes_ptr[bytes_offset], reflection_shader_stages.ptr(), stage_count * sizeof(RenderingDeviceCommons::ShaderStage));
393
bytes_offset += stage_count * sizeof(RenderingDeviceCommons::ShaderStage);
394
}
395
396
for (uint32_t i = 0; i < shaders.size(); i++) {
397
const Shader &shader = shaders[i];
398
ShaderHeader &header = *(ShaderHeader *)(&bytes.ptr()[bytes_offset]);
399
header.shader_stage = shader.shader_stage;
400
header.code_compressed_size = uint32_t(shader.code_compressed_bytes.size());
401
header.code_compression_flags = shader.code_compression_flags;
402
header.code_decompressed_size = shader.code_decompressed_size;
403
bytes_offset += sizeof(ShaderHeader);
404
memcpy(&bytes.ptrw()[bytes_offset], shader.code_compressed_bytes.ptr(), shader.code_compressed_bytes.size());
405
bytes_offset = aligned_to(bytes_offset + shader.code_compressed_bytes.size(), alignment);
406
bytes_offset += _to_bytes_shader_extra_data(&bytes_ptr[bytes_offset], i);
407
}
408
409
bytes_offset += _to_bytes_footer_extra_data(&bytes_ptr[bytes_offset]);
410
411
ERR_FAIL_COND_V_MSG(bytes_offset != total_size, PackedByteArray(), "Amount of bytes written does not match the amount of bytes reserved for the container.");
412
return bytes;
413
}
414
415
bool RenderingShaderContainer::compress_code(const uint8_t *p_decompressed_bytes, uint32_t p_decompressed_size, uint8_t *p_compressed_bytes, uint32_t *r_compressed_size, uint32_t *r_compressed_flags) const {
416
DEV_ASSERT(p_decompressed_bytes != nullptr);
417
DEV_ASSERT(p_decompressed_size > 0);
418
DEV_ASSERT(p_compressed_bytes != nullptr);
419
DEV_ASSERT(r_compressed_size != nullptr);
420
DEV_ASSERT(r_compressed_flags != nullptr);
421
422
*r_compressed_flags = 0;
423
424
PackedByteArray zstd_bytes;
425
const int64_t zstd_max_bytes = Compression::get_max_compressed_buffer_size(p_decompressed_size, Compression::MODE_ZSTD);
426
zstd_bytes.resize(zstd_max_bytes);
427
428
const int64_t zstd_size = Compression::compress(zstd_bytes.ptrw(), p_decompressed_bytes, p_decompressed_size, Compression::MODE_ZSTD);
429
if (zstd_size > 0 && (uint32_t)(zstd_size) < p_decompressed_size) {
430
// Only choose Zstd if it results in actual compression.
431
memcpy(p_compressed_bytes, zstd_bytes.ptr(), zstd_size);
432
*r_compressed_size = zstd_size;
433
*r_compressed_flags |= COMPRESSION_FLAG_ZSTD;
434
} else {
435
// Just copy the input to the output directly.
436
memcpy(p_compressed_bytes, p_decompressed_bytes, p_decompressed_size);
437
*r_compressed_size = p_decompressed_size;
438
}
439
440
return true;
441
}
442
443
bool RenderingShaderContainer::decompress_code(const uint8_t *p_compressed_bytes, uint32_t p_compressed_size, uint32_t p_compressed_flags, uint8_t *p_decompressed_bytes, uint32_t p_decompressed_size) const {
444
DEV_ASSERT(p_compressed_bytes != nullptr);
445
DEV_ASSERT(p_compressed_size > 0);
446
DEV_ASSERT(p_decompressed_bytes != nullptr);
447
DEV_ASSERT(p_decompressed_size > 0);
448
449
bool uses_zstd = p_compressed_flags & COMPRESSION_FLAG_ZSTD;
450
if (uses_zstd) {
451
if (!Compression::decompress(p_decompressed_bytes, p_decompressed_size, p_compressed_bytes, p_compressed_size, Compression::MODE_ZSTD)) {
452
ERR_FAIL_V_MSG(false, "Malformed zstd input for decompressing shader code.");
453
}
454
} else {
455
memcpy(p_decompressed_bytes, p_compressed_bytes, MIN(p_compressed_size, p_decompressed_size));
456
}
457
458
return true;
459
}
460
461
RenderingShaderContainer::RenderingShaderContainer() {}
462
463
RenderingShaderContainer::~RenderingShaderContainer() {}
464
465