Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp
10889 views
1
//
2
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3
// Copyright (C) 2012-2015 LunarG, Inc.
4
// Copyright (C) 2015-2018 Google, Inc.
5
// Copyright (C) 2017, 2019 ARM Limited.
6
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
7
//
8
// All rights reserved.
9
//
10
// Redistribution and use in source and binary forms, with or without
11
// modification, are permitted provided that the following conditions
12
// are met:
13
//
14
// Redistributions of source code must retain the above copyright
15
// notice, this list of conditions and the following disclaimer.
16
//
17
// Redistributions in binary form must reproduce the above
18
// copyright notice, this list of conditions and the following
19
// disclaimer in the documentation and/or other materials provided
20
// with the distribution.
21
//
22
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
23
// contributors may be used to endorse or promote products derived
24
// from this software without specific prior written permission.
25
//
26
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
29
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
30
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
31
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
32
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
33
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
34
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
36
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37
// POSSIBILITY OF SUCH DAMAGE.
38
//
39
40
#include "ParseHelper.h"
41
#include "Initialize.h"
42
#include "Scan.h"
43
44
#include "../OSDependent/osinclude.h"
45
#include <algorithm>
46
47
#include "preprocessor/PpContext.h"
48
49
extern int yyparse(glslang::TParseContext*);
50
51
namespace glslang {
52
53
TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins,
54
int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
55
TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
56
const TString* entryPoint) :
57
TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language,
58
infoSink, forwardCompatible, messages, entryPoint),
59
inMain(false),
60
blockName(nullptr),
61
limits(resources.limits),
62
atomicUintOffsets(nullptr), anyIndexLimits(false)
63
{
64
// decide whether precision qualifiers should be ignored or respected
65
if (isEsProfile() || spvVersion.vulkan > 0) {
66
precisionManager.respectPrecisionQualifiers();
67
if (! parsingBuiltins && language == EShLangFragment && !isEsProfile() && spvVersion.vulkan > 0)
68
precisionManager.warnAboutDefaults();
69
}
70
71
setPrecisionDefaults();
72
73
globalUniformDefaults.clear();
74
globalUniformDefaults.layoutMatrix = ElmColumnMajor;
75
globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared;
76
77
globalBufferDefaults.clear();
78
globalBufferDefaults.layoutMatrix = ElmColumnMajor;
79
globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared;
80
81
globalInputDefaults.clear();
82
globalOutputDefaults.clear();
83
84
globalSharedDefaults.clear();
85
globalSharedDefaults.layoutMatrix = ElmColumnMajor;
86
globalSharedDefaults.layoutPacking = ElpStd430;
87
88
// "Shaders in the transform
89
// feedback capturing mode have an initial global default of
90
// layout(xfb_buffer = 0) out;"
91
if (language == EShLangVertex ||
92
language == EShLangTessControl ||
93
language == EShLangTessEvaluation ||
94
language == EShLangGeometry)
95
globalOutputDefaults.layoutXfbBuffer = 0;
96
97
if (language == EShLangGeometry)
98
globalOutputDefaults.layoutStream = 0;
99
100
if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main")
101
infoSink.info.message(EPrefixError, "Source entry point must be \"main\"");
102
}
103
104
TParseContext::~TParseContext()
105
{
106
delete [] atomicUintOffsets;
107
}
108
109
// Set up all default precisions as needed by the current environment.
110
// Intended just as a TParseContext constructor helper.
111
void TParseContext::setPrecisionDefaults()
112
{
113
// Set all precision defaults to EpqNone, which is correct for all types
114
// when not obeying precision qualifiers, and correct for types that don't
115
// have defaults (thus getting an error on use) when obeying precision
116
// qualifiers.
117
118
for (int type = 0; type < EbtNumTypes; ++type)
119
defaultPrecision[type] = EpqNone;
120
121
for (int type = 0; type < maxSamplerIndex; ++type)
122
defaultSamplerPrecision[type] = EpqNone;
123
124
// replace with real precision defaults for those that have them
125
if (obeyPrecisionQualifiers()) {
126
if (isEsProfile()) {
127
// Most don't have defaults, a few default to lowp.
128
TSampler sampler;
129
sampler.set(EbtFloat, Esd2D);
130
defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
131
sampler.set(EbtFloat, EsdCube);
132
defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
133
sampler.set(EbtFloat, Esd2D);
134
sampler.setExternal(true);
135
defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
136
}
137
138
// If we are parsing built-in computational variables/functions, it is meaningful to record
139
// whether the built-in has no precision qualifier, as that ambiguity
140
// is used to resolve the precision from the supplied arguments/operands instead.
141
// So, we don't actually want to replace EpqNone with a default precision for built-ins.
142
if (! parsingBuiltins) {
143
if (isEsProfile() && language == EShLangFragment) {
144
defaultPrecision[EbtInt] = EpqMedium;
145
defaultPrecision[EbtUint] = EpqMedium;
146
} else {
147
defaultPrecision[EbtInt] = EpqHigh;
148
defaultPrecision[EbtUint] = EpqHigh;
149
defaultPrecision[EbtFloat] = EpqHigh;
150
}
151
152
if (!isEsProfile()) {
153
// Non-ES profile
154
// All sampler precisions default to highp.
155
for (int type = 0; type < maxSamplerIndex; ++type)
156
defaultSamplerPrecision[type] = EpqHigh;
157
}
158
}
159
160
defaultPrecision[EbtSampler] = EpqLow;
161
defaultPrecision[EbtAtomicUint] = EpqHigh;
162
}
163
}
164
165
void TParseContext::setLimits(const TBuiltInResource& r)
166
{
167
resources = r;
168
intermediate.setLimits(r);
169
170
anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing ||
171
! limits.generalConstantMatrixVectorIndexing ||
172
! limits.generalSamplerIndexing ||
173
! limits.generalUniformIndexing ||
174
! limits.generalVariableIndexing ||
175
! limits.generalVaryingIndexing;
176
177
178
// "Each binding point tracks its own current default offset for
179
// inheritance of subsequent variables using the same binding. The initial state of compilation is that all
180
// binding points have an offset of 0."
181
atomicUintOffsets = new int[resources.maxAtomicCounterBindings];
182
for (int b = 0; b < resources.maxAtomicCounterBindings; ++b)
183
atomicUintOffsets[b] = 0;
184
}
185
186
//
187
// Parse an array of strings using yyparse, going through the
188
// preprocessor to tokenize the shader strings, then through
189
// the GLSL scanner.
190
//
191
// Returns true for successful acceptance of the shader, false if any errors.
192
//
193
bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError)
194
{
195
currentScanner = &input;
196
ppContext.setInput(input, versionWillBeError);
197
yyparse(this);
198
199
finish();
200
201
return numErrors == 0;
202
}
203
204
// This is called from bison when it has a parse (syntax) error
205
// Note though that to stop cascading errors, we set EOF, which
206
// will usually cause a syntax error, so be more accurate that
207
// compilation is terminating.
208
void TParseContext::parserError(const char* s)
209
{
210
if (! getScanner()->atEndOfInput() || numErrors == 0)
211
error(getCurrentLoc(), "", "", s, "");
212
else
213
error(getCurrentLoc(), "compilation terminated", "", "");
214
}
215
216
void TParseContext::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList)
217
{
218
bool createBlock = globalUniformBlock == nullptr;
219
220
if (createBlock) {
221
globalUniformBinding = intermediate.getGlobalUniformBinding();
222
globalUniformSet = intermediate.getGlobalUniformSet();
223
}
224
225
// use base class function to create/expand block
226
TParseContextBase::growGlobalUniformBlock(loc, memberType, memberName, typeList);
227
228
if (spvVersion.vulkan > 0 && spvVersion.vulkanRelaxed) {
229
// check for a block storage override
230
TBlockStorageClass storageOverride = intermediate.getBlockStorageOverride(getGlobalUniformBlockName());
231
TQualifier& qualifier = globalUniformBlock->getWritableType().getQualifier();
232
qualifier.defaultBlock = true;
233
234
if (storageOverride != EbsNone) {
235
if (createBlock) {
236
// Remap block storage
237
qualifier.setBlockStorage(storageOverride);
238
239
// check that the change didn't create errors
240
blockQualifierCheck(loc, qualifier, false);
241
}
242
243
// remap meber storage as well
244
memberType.getQualifier().setBlockStorage(storageOverride);
245
}
246
}
247
}
248
249
void TParseContext::growAtomicCounterBlock(int binding, const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList)
250
{
251
bool createBlock = atomicCounterBuffers.find(binding) == atomicCounterBuffers.end();
252
253
if (createBlock) {
254
atomicCounterBlockSet = intermediate.getAtomicCounterBlockSet();
255
}
256
257
// use base class function to create/expand block
258
TParseContextBase::growAtomicCounterBlock(binding, loc, memberType, memberName, typeList);
259
TQualifier& qualifier = atomicCounterBuffers[binding]->getWritableType().getQualifier();
260
qualifier.defaultBlock = true;
261
262
if (spvVersion.vulkan > 0 && spvVersion.vulkanRelaxed) {
263
// check for a Block storage override
264
TBlockStorageClass storageOverride = intermediate.getBlockStorageOverride(getAtomicCounterBlockName());
265
266
if (storageOverride != EbsNone) {
267
if (createBlock) {
268
// Remap block storage
269
270
qualifier.setBlockStorage(storageOverride);
271
272
// check that the change didn't create errors
273
blockQualifierCheck(loc, qualifier, false);
274
}
275
276
// remap meber storage as well
277
memberType.getQualifier().setBlockStorage(storageOverride);
278
}
279
}
280
}
281
282
const char* TParseContext::getGlobalUniformBlockName() const
283
{
284
const char* name = intermediate.getGlobalUniformBlockName();
285
if (std::string(name) == "")
286
return "gl_DefaultUniformBlock";
287
else
288
return name;
289
}
290
void TParseContext::finalizeGlobalUniformBlockLayout(TVariable&)
291
{
292
}
293
void TParseContext::setUniformBlockDefaults(TType& block) const
294
{
295
block.getQualifier().layoutPacking = ElpStd140;
296
block.getQualifier().layoutMatrix = ElmColumnMajor;
297
}
298
299
300
const char* TParseContext::getAtomicCounterBlockName() const
301
{
302
const char* name = intermediate.getAtomicCounterBlockName();
303
if (std::string(name) == "")
304
return "gl_AtomicCounterBlock";
305
else
306
return name;
307
}
308
void TParseContext::finalizeAtomicCounterBlockLayout(TVariable&)
309
{
310
}
311
312
void TParseContext::setAtomicCounterBlockDefaults(TType& block) const
313
{
314
block.getQualifier().layoutPacking = ElpStd430;
315
block.getQualifier().layoutMatrix = ElmRowMajor;
316
}
317
318
void TParseContext::setInvariant(const TSourceLoc& loc, const char* builtin) {
319
TSymbol* symbol = symbolTable.find(builtin);
320
if (symbol && symbol->getType().getQualifier().isPipeOutput()) {
321
if (intermediate.inIoAccessed(builtin))
322
warn(loc, "changing qualification after use", "invariant", builtin);
323
TSymbol* csymbol = symbolTable.copyUp(symbol);
324
csymbol->getWritableType().getQualifier().invariant = true;
325
}
326
}
327
328
void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>& tokens)
329
{
330
if (pragmaCallback)
331
pragmaCallback(loc.line, tokens);
332
333
if (tokens.size() == 0)
334
return;
335
336
if (tokens[0].compare("optimize") == 0) {
337
if (tokens.size() != 4) {
338
error(loc, "optimize pragma syntax is incorrect", "#pragma", "");
339
return;
340
}
341
342
if (tokens[1].compare("(") != 0) {
343
error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", "");
344
return;
345
}
346
347
if (tokens[2].compare("on") == 0)
348
contextPragma.optimize = true;
349
else if (tokens[2].compare("off") == 0)
350
contextPragma.optimize = false;
351
else {
352
if(relaxedErrors())
353
// If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma.
354
warn(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", "");
355
return;
356
}
357
358
if (tokens[3].compare(")") != 0) {
359
error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", "");
360
return;
361
}
362
} else if (tokens[0].compare("debug") == 0) {
363
if (tokens.size() != 4) {
364
error(loc, "debug pragma syntax is incorrect", "#pragma", "");
365
return;
366
}
367
368
if (tokens[1].compare("(") != 0) {
369
error(loc, "\"(\" expected after 'debug' keyword", "#pragma", "");
370
return;
371
}
372
373
if (tokens[2].compare("on") == 0)
374
contextPragma.debug = true;
375
else if (tokens[2].compare("off") == 0)
376
contextPragma.debug = false;
377
else {
378
if(relaxedErrors())
379
// If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma.
380
warn(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", "");
381
return;
382
}
383
384
if (tokens[3].compare(")") != 0) {
385
error(loc, "\")\" expected to end 'debug' pragma", "#pragma", "");
386
return;
387
}
388
} else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) {
389
if (tokens.size() != 1)
390
error(loc, "extra tokens", "#pragma", "");
391
intermediate.setUseStorageBuffer();
392
} else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) {
393
if (tokens.size() != 1)
394
error(loc, "extra tokens", "#pragma", "");
395
intermediate.setUseVulkanMemoryModel();
396
} else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) {
397
if (tokens.size() != 1)
398
error(loc, "extra tokens", "#pragma", "");
399
if (spvVersion.spv < glslang::EShTargetSpv_1_3)
400
error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", "");
401
intermediate.setUseVariablePointers();
402
} else if (tokens[0].compare("once") == 0) {
403
warn(loc, "not implemented", "#pragma once", "");
404
} else if (tokens[0].compare("glslang_binary_double_output") == 0) {
405
intermediate.setBinaryDoubleOutput();
406
} else if (spvVersion.spv > 0 && tokens[0].compare("STDGL") == 0 &&
407
tokens[1].compare("invariant") == 0 && tokens[3].compare("all") == 0) {
408
intermediate.setInvariantAll();
409
// Set all builtin out variables invariant if declared
410
setInvariant(loc, "gl_Position");
411
setInvariant(loc, "gl_PointSize");
412
setInvariant(loc, "gl_ClipDistance");
413
setInvariant(loc, "gl_CullDistance");
414
setInvariant(loc, "gl_TessLevelOuter");
415
setInvariant(loc, "gl_TessLevelInner");
416
setInvariant(loc, "gl_PrimitiveID");
417
setInvariant(loc, "gl_Layer");
418
setInvariant(loc, "gl_ViewportIndex");
419
setInvariant(loc, "gl_FragDepth");
420
setInvariant(loc, "gl_SampleMask");
421
setInvariant(loc, "gl_ClipVertex");
422
setInvariant(loc, "gl_FrontColor");
423
setInvariant(loc, "gl_BackColor");
424
setInvariant(loc, "gl_FrontSecondaryColor");
425
setInvariant(loc, "gl_BackSecondaryColor");
426
setInvariant(loc, "gl_TexCoord");
427
setInvariant(loc, "gl_FogFragCoord");
428
setInvariant(loc, "gl_FragColor");
429
setInvariant(loc, "gl_FragData");
430
}
431
}
432
433
//
434
// Handle seeing a variable identifier in the grammar.
435
//
436
TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string)
437
{
438
TIntermTyped* node = nullptr;
439
440
// Error check for requiring specific extensions present.
441
if (symbol && symbol->getNumExtensions())
442
requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str());
443
444
if (symbol && symbol->isReadOnly()) {
445
// All shared things containing an unsized array must be copied up
446
// on first use, so that all future references will share its array structure,
447
// so that editing the implicit size will effect all nodes consuming it,
448
// and so that editing the implicit size won't change the shared one.
449
//
450
// If this is a variable or a block, check it and all it contains, but if this
451
// is a member of an anonymous block, check the whole block, as the whole block
452
// will need to be copied up if it contains an unsized array.
453
//
454
// This check is being done before the block-name check further down, so guard
455
// for that too.
456
if (!symbol->getType().isUnusableName()) {
457
if (symbol->getType().containsUnsizedArray() ||
458
(symbol->getAsAnonMember() &&
459
symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray()))
460
makeEditable(symbol);
461
}
462
}
463
464
const TVariable* variable;
465
const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr;
466
if (anon) {
467
// It was a member of an anonymous container.
468
469
// Create a subtree for its dereference.
470
variable = anon->getAnonContainer().getAsVariable();
471
TIntermTyped* container = intermediate.addSymbol(*variable, loc);
472
TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc);
473
node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc);
474
475
node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type);
476
if (node->getType().hiddenMember())
477
error(loc, "member of nameless block was not redeclared", string->c_str(), "");
478
} else {
479
// Not a member of an anonymous container.
480
481
// The symbol table search was done in the lexical phase.
482
// See if it was a variable.
483
variable = symbol ? symbol->getAsVariable() : nullptr;
484
if (variable) {
485
if (variable->getType().isUnusableName()) {
486
error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), "");
487
variable = nullptr;
488
}
489
490
if (language == EShLangMesh && variable) {
491
TLayoutGeometry primitiveType = intermediate.getOutputPrimitive();
492
if ((variable->getMangledName() == "gl_PrimitiveTriangleIndicesEXT" && primitiveType != ElgTriangles) ||
493
(variable->getMangledName() == "gl_PrimitiveLineIndicesEXT" && primitiveType != ElgLines) ||
494
(variable->getMangledName() == "gl_PrimitivePointIndicesEXT" && primitiveType != ElgPoints)) {
495
error(loc, "cannot be used (ouput primitive type mismatch)", string->c_str(), "");
496
variable = nullptr;
497
}
498
}
499
} else {
500
if (symbol)
501
error(loc, "variable name expected", string->c_str(), "");
502
}
503
504
// Recovery, if it wasn't found or was not a variable.
505
if (! variable)
506
variable = new TVariable(string, TType(EbtVoid));
507
508
if (variable->getType().getQualifier().isFrontEndConstant())
509
node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc);
510
else
511
node = intermediate.addSymbol(*variable, loc);
512
}
513
514
if (variable->getType().getQualifier().isIo())
515
intermediate.addIoAccessed(*string);
516
517
if (variable->getType().isReference() &&
518
variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) {
519
intermediate.setUseVulkanMemoryModel();
520
}
521
522
return node;
523
}
524
525
//
526
// Handle seeing a base[index] dereference in the grammar.
527
//
528
TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index)
529
{
530
int indexValue = 0;
531
if (index->getQualifier().isFrontEndConstant())
532
indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst();
533
534
// basic type checks...
535
variableCheck(base);
536
537
if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() &&
538
! base->isReference()) {
539
if (base->getAsSymbolNode())
540
error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), "");
541
else
542
error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", "");
543
544
// Insert dummy error-recovery result
545
return intermediate.addConstantUnion(0.0, EbtFloat, loc);
546
}
547
548
if (!base->isArray() && base->isVector()) {
549
if (base->getType().contains16BitFloat())
550
requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16");
551
if (base->getType().contains16BitInt())
552
requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16");
553
if (base->getType().contains8BitInt())
554
requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8");
555
}
556
557
// check for constant folding
558
if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) {
559
// both base and index are front-end constants
560
checkIndex(loc, base->getType(), indexValue);
561
return intermediate.foldDereference(base, indexValue, loc);
562
}
563
564
// at least one of base and index is not a front-end constant variable...
565
TIntermTyped* result = nullptr;
566
567
if (base->isReference() && ! base->isArray()) {
568
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing");
569
if (base->getType().getReferentType()->containsUnsizedArray()) {
570
error(loc, "cannot index reference to buffer containing an unsized array", "", "");
571
result = nullptr;
572
} else {
573
result = intermediate.addBinaryMath(EOpAdd, base, index, loc);
574
if (result != nullptr)
575
result->setType(base->getType());
576
}
577
if (result == nullptr) {
578
error(loc, "cannot index buffer reference", "", "");
579
result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
580
}
581
return result;
582
}
583
if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
584
handleIoResizeArrayAccess(loc, base);
585
586
if (index->getQualifier().isFrontEndConstant())
587
checkIndex(loc, base->getType(), indexValue);
588
589
if (index->getQualifier().isFrontEndConstant()) {
590
if (base->getType().isUnsizedArray()) {
591
base->getWritableType().updateImplicitArraySize(indexValue + 1);
592
base->getWritableType().setImplicitlySized(true);
593
if (base->getQualifier().builtIn == EbvClipDistance &&
594
indexValue >= resources.maxClipDistances) {
595
error(loc, "gl_ClipDistance", "[", "array index out of range '%d'", indexValue);
596
}
597
else if (base->getQualifier().builtIn == EbvCullDistance &&
598
indexValue >= resources.maxCullDistances) {
599
error(loc, "gl_CullDistance", "[", "array index out of range '%d'", indexValue);
600
}
601
else if (base->getQualifier().builtIn == EbvSampleMask &&
602
indexValue >= (resources.maxSamples + 31) / 32) {
603
error(loc, "gl_SampleMask", "[", "array index out of range '%d'", indexValue);
604
}
605
// For 2D per-view builtin arrays, update the inner dimension size in parent type
606
if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) {
607
TIntermBinary* binaryNode = base->getAsBinaryNode();
608
if (binaryNode) {
609
TType& leftType = binaryNode->getLeft()->getWritableType();
610
TArraySizes& arraySizes = *leftType.getArraySizes();
611
assert(arraySizes.getNumDims() == 2);
612
arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1));
613
}
614
}
615
} else
616
checkIndex(loc, base->getType(), indexValue);
617
result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
618
} else {
619
if (base->getType().isUnsizedArray()) {
620
// we have a variable index into an unsized array, which is okay,
621
// depending on the situation
622
if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
623
error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable");
624
else {
625
// it is okay for a run-time sized array
626
checkRuntimeSizable(loc, *base);
627
}
628
base->getWritableType().setArrayVariablyIndexed();
629
}
630
if (base->getBasicType() == EbtBlock) {
631
if (base->getQualifier().storage == EvqBuffer)
632
requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array");
633
else if (base->getQualifier().storage == EvqUniform)
634
profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
635
"variable indexing uniform block array");
636
else {
637
// input/output blocks either don't exist or can't be variably indexed
638
}
639
} else if (language == EShLangFragment && base->getQualifier().isPipeOutput() && base->getQualifier().builtIn != EbvSampleMask)
640
requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array");
641
else if (base->getBasicType() == EbtSampler && version >= 130) {
642
const char* explanation = "variable indexing sampler array";
643
requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation);
644
profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation);
645
profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation);
646
}
647
648
result = intermediate.addIndex(EOpIndexIndirect, base, index, loc);
649
}
650
651
// Insert valid dereferenced result type
652
TType newType(base->getType(), 0);
653
if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) {
654
newType.getQualifier().storage = EvqConst;
655
// If base or index is a specialization constant, the result should also be a specialization constant.
656
if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) {
657
newType.getQualifier().makeSpecConstant();
658
}
659
} else {
660
newType.getQualifier().storage = EvqTemporary;
661
newType.getQualifier().specConstant = false;
662
}
663
result->setType(newType);
664
665
inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier());
666
667
// Propagate nonuniform
668
if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform())
669
result->getWritableType().getQualifier().nonUniform = true;
670
671
if (anyIndexLimits)
672
handleIndexLimits(loc, base, index);
673
674
return result;
675
}
676
677
// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms
678
void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index)
679
{
680
if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) ||
681
(! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) ||
682
(! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) ||
683
(! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) ||
684
(! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() &&
685
! base->getType().getQualifier().isPipeInput() &&
686
! base->getType().getQualifier().isPipeOutput() &&
687
! base->getType().getQualifier().isConstant()) ||
688
(! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() ||
689
base->getType().getQualifier().isPipeOutput()))) {
690
// it's too early to know what the inductive variables are, save it for post processing
691
needsIndexLimitationChecking.push_back(index);
692
}
693
}
694
695
// Make a shared symbol have a non-shared version that can be edited by the current
696
// compile, such that editing its type will not change the shared version and will
697
// effect all nodes sharing it.
698
void TParseContext::makeEditable(TSymbol*& symbol)
699
{
700
TParseContextBase::makeEditable(symbol);
701
702
// See if it's tied to IO resizing
703
if (isIoResizeArray(symbol->getType()))
704
ioArraySymbolResizeList.push_back(symbol);
705
}
706
707
// Return true if this is a geometry shader input array or tessellation control output array
708
// or mesh shader output array.
709
bool TParseContext::isIoResizeArray(const TType& type) const
710
{
711
return type.isArray() &&
712
((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) ||
713
(language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut &&
714
! type.getQualifier().patch) ||
715
(language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn &&
716
(type.getQualifier().pervertexNV || type.getQualifier().pervertexEXT)) ||
717
(language == EShLangMesh && type.getQualifier().storage == EvqVaryingOut &&
718
!type.getQualifier().perTaskNV));
719
}
720
721
// If an array is not isIoResizeArray() but is an io array, make sure it has the right size
722
void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type)
723
{
724
if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel())
725
return;
726
727
assert(! isIoResizeArray(type));
728
729
if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch)
730
return;
731
732
if (language == EShLangTessControl || language == EShLangTessEvaluation) {
733
if (type.getOuterArraySize() != resources.maxPatchVertices) {
734
if (type.isSizedArray())
735
error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", "");
736
type.changeOuterArraySize(resources.maxPatchVertices);
737
}
738
}
739
}
740
741
// Issue any errors if the non-array object is missing arrayness WRT
742
// shader I/O that has array requirements.
743
// All arrayness checking is handled in array paths, this is for
744
void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
745
{
746
if (! type.isArray() && ! symbolTable.atBuiltInLevel()) {
747
if (type.getQualifier().isArrayedIo(language) && !type.getQualifier().layoutPassthrough)
748
error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str());
749
}
750
}
751
752
// Handle a dereference of a geometry shader input array or tessellation control output array.
753
// See ioArraySymbolResizeList comment in ParseHelper.h.
754
//
755
void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base)
756
{
757
TIntermSymbol* symbolNode = base->getAsSymbolNode();
758
assert(symbolNode);
759
if (! symbolNode)
760
return;
761
762
// fix array size, if it can be fixed and needs to be fixed (will allow variable indexing)
763
if (symbolNode->getType().isUnsizedArray()) {
764
int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier());
765
if (newSize > 0)
766
symbolNode->getWritableType().changeOuterArraySize(newSize);
767
}
768
}
769
770
// If there has been an input primitive declaration (geometry shader) or an output
771
// number of vertices declaration(tessellation shader), make sure all input array types
772
// match it in size. Types come either from nodes in the AST or symbols in the
773
// symbol table.
774
//
775
// Types without an array size will be given one.
776
// Types already having a size that is wrong will get an error.
777
//
778
void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly)
779
{
780
int requiredSize = 0;
781
TString featureString;
782
size_t listSize = ioArraySymbolResizeList.size();
783
size_t i = 0;
784
785
// If tailOnly = true, only check the last array symbol in the list.
786
if (tailOnly) {
787
i = listSize - 1;
788
}
789
for (bool firstIteration = true; i < listSize; ++i) {
790
TType &type = ioArraySymbolResizeList[i]->getWritableType();
791
792
// As I/O array sizes don't change, fetch requiredSize only once,
793
// except for mesh shaders which could have different I/O array sizes based on type qualifiers.
794
if (firstIteration || (language == EShLangMesh)) {
795
requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString);
796
if (requiredSize == 0)
797
break;
798
firstIteration = false;
799
}
800
801
checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type,
802
ioArraySymbolResizeList[i]->getName());
803
}
804
}
805
806
int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const
807
{
808
int expectedSize = 0;
809
TString str = "unknown";
810
unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0;
811
812
if (language == EShLangGeometry) {
813
expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive());
814
str = TQualifier::getGeometryString(intermediate.getInputPrimitive());
815
}
816
else if (language == EShLangTessControl) {
817
expectedSize = maxVertices;
818
str = "vertices";
819
} else if (language == EShLangFragment) {
820
// Number of vertices for Fragment shader is always three.
821
expectedSize = 3;
822
str = "vertices";
823
} else if (language == EShLangMesh) {
824
unsigned int maxPrimitives =
825
intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0;
826
if (qualifier.builtIn == EbvPrimitiveIndicesNV) {
827
expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive());
828
str = "max_primitives*";
829
str += TQualifier::getGeometryString(intermediate.getOutputPrimitive());
830
}
831
else if (qualifier.builtIn == EbvPrimitiveTriangleIndicesEXT || qualifier.builtIn == EbvPrimitiveLineIndicesEXT ||
832
qualifier.builtIn == EbvPrimitivePointIndicesEXT) {
833
expectedSize = maxPrimitives;
834
str = "max_primitives";
835
}
836
else if (qualifier.isPerPrimitive()) {
837
expectedSize = maxPrimitives;
838
str = "max_primitives";
839
}
840
else {
841
expectedSize = maxVertices;
842
str = "max_vertices";
843
}
844
}
845
if (featureString)
846
*featureString = str;
847
return expectedSize;
848
}
849
850
void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name)
851
{
852
if (type.isUnsizedArray())
853
type.changeOuterArraySize(requiredSize);
854
else if (type.getOuterArraySize() != requiredSize) {
855
if (language == EShLangGeometry)
856
error(loc, "inconsistent input primitive for array size of", feature, name.c_str());
857
else if (language == EShLangTessControl)
858
error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str());
859
else if (language == EShLangFragment) {
860
if (type.getOuterArraySize() > requiredSize)
861
error(loc, " cannot be greater than 3 for pervertexEXT", feature, name.c_str());
862
}
863
else if (language == EShLangMesh)
864
error(loc, "inconsistent output array size of", feature, name.c_str());
865
else
866
assert(0);
867
}
868
}
869
870
// Handle seeing a binary node with a math operation.
871
// Returns nullptr if not semantically allowed.
872
TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right)
873
{
874
rValueErrorCheck(loc, str, left->getAsTyped());
875
rValueErrorCheck(loc, str, right->getAsTyped());
876
877
bool allowed = true;
878
switch (op) {
879
// TODO: Bring more source language-specific checks up from intermediate.cpp
880
// to the specific parse helpers for that source language.
881
case EOpLessThan:
882
case EOpGreaterThan:
883
case EOpLessThanEqual:
884
case EOpGreaterThanEqual:
885
if (! left->isScalar() || ! right->isScalar())
886
allowed = false;
887
break;
888
default:
889
break;
890
}
891
892
if (((left->getType().contains16BitFloat() || right->getType().contains16BitFloat()) && !float16Arithmetic()) ||
893
((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) ||
894
((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) {
895
allowed = false;
896
}
897
898
TIntermTyped* result = nullptr;
899
if (allowed) {
900
if ((left->isReference() || right->isReference()))
901
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference math");
902
result = intermediate.addBinaryMath(op, left, right, loc);
903
}
904
905
if (result == nullptr) {
906
bool enhanced = intermediate.getEnhancedMsgs();
907
binaryOpError(loc, str, left->getCompleteString(enhanced), right->getCompleteString(enhanced));
908
}
909
910
return result;
911
}
912
913
// Handle seeing a unary node with a math operation.
914
TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode)
915
{
916
rValueErrorCheck(loc, str, childNode);
917
918
bool allowed = true;
919
if ((childNode->getType().contains16BitFloat() && !float16Arithmetic()) ||
920
(childNode->getType().contains16BitInt() && !int16Arithmetic()) ||
921
(childNode->getType().contains8BitInt() && !int8Arithmetic())) {
922
allowed = false;
923
}
924
925
TIntermTyped* result = nullptr;
926
if (allowed)
927
result = intermediate.addUnaryMath(op, childNode, loc);
928
929
if (result)
930
return result;
931
else {
932
bool enhanced = intermediate.getEnhancedMsgs();
933
unaryOpError(loc, str, childNode->getCompleteString(enhanced));
934
}
935
936
return childNode;
937
}
938
939
//
940
// Handle seeing a base.field dereference in the grammar.
941
//
942
TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
943
{
944
variableCheck(base);
945
946
//
947
// .length() can't be resolved until we later see the function-calling syntax.
948
// Save away the name in the AST for now. Processing is completed in
949
// handleLengthMethod().
950
//
951
if (field == "length") {
952
if (base->isArray()) {
953
profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length");
954
profileRequires(loc, EEsProfile, 300, nullptr, ".length");
955
} else if (base->isVector() || base->isMatrix()) {
956
const char* feature = ".length() on vectors and matrices";
957
requireProfile(loc, ~EEsProfile, feature);
958
profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature);
959
} else if (!base->getType().isCoopMat()) {
960
bool enhanced = intermediate.getEnhancedMsgs();
961
error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString(enhanced).c_str());
962
return base;
963
}
964
965
return intermediate.addMethod(base, TType(EbtInt), &field, loc);
966
}
967
968
// It's not .length() if we get to here.
969
970
if (base->isArray()) {
971
error(loc, "cannot apply to an array:", ".", field.c_str());
972
973
return base;
974
}
975
976
if (base->getType().isCoopMat()) {
977
error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str());
978
return base;
979
}
980
981
// It's neither an array nor .length() if we get here,
982
// leaving swizzles and struct/block dereferences.
983
984
TIntermTyped* result = base;
985
if ((base->isVector() || base->isScalar()) &&
986
(base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) {
987
result = handleDotSwizzle(loc, base, field);
988
} else if (base->isStruct() || base->isReference()) {
989
const TTypeList* fields = base->isReference() ?
990
base->getType().getReferentType()->getStruct() :
991
base->getType().getStruct();
992
bool fieldFound = false;
993
int member;
994
for (member = 0; member < (int)fields->size(); ++member) {
995
if ((*fields)[member].type->getFieldName() == field) {
996
fieldFound = true;
997
break;
998
}
999
}
1000
1001
if (fieldFound) {
1002
if (spvVersion.vulkan != 0 && spvVersion.vulkanRelaxed)
1003
result = vkRelaxedRemapDotDereference(loc, *base, *(*fields)[member].type, field);
1004
1005
if (result == base)
1006
{
1007
if (base->getType().getQualifier().isFrontEndConstant())
1008
result = intermediate.foldDereference(base, member, loc);
1009
else {
1010
blockMemberExtensionCheck(loc, base, member, field);
1011
TIntermTyped* index = intermediate.addConstantUnion(member, loc);
1012
result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc);
1013
result->setType(*(*fields)[member].type);
1014
if ((*fields)[member].type->getQualifier().isIo())
1015
intermediate.addIoAccessed(field);
1016
}
1017
}
1018
1019
inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier());
1020
} else {
1021
auto baseSymbol = base;
1022
while (baseSymbol->getAsSymbolNode() == nullptr) {
1023
auto binaryNode = baseSymbol->getAsBinaryNode();
1024
if (binaryNode == nullptr) break;
1025
baseSymbol = binaryNode->getLeft();
1026
}
1027
if (baseSymbol->getAsSymbolNode() != nullptr) {
1028
TString structName;
1029
structName.append("\'").append(baseSymbol->getAsSymbolNode()->getName().c_str()).append("\'");
1030
error(loc, "no such field in structure", field.c_str(), structName.c_str());
1031
} else {
1032
error(loc, "no such field in structure", field.c_str(), "");
1033
}
1034
}
1035
} else
1036
error(loc, "does not apply to this type:", field.c_str(),
1037
base->getType().getCompleteString(intermediate.getEnhancedMsgs()).c_str());
1038
1039
// Propagate noContraction up the dereference chain
1040
if (base->getQualifier().isNoContraction())
1041
result->getWritableType().getQualifier().setNoContraction();
1042
1043
// Propagate nonuniform
1044
if (base->getQualifier().isNonUniform())
1045
result->getWritableType().getQualifier().nonUniform = true;
1046
1047
return result;
1048
}
1049
1050
//
1051
// Handle seeing a base.swizzle, a subset of base.identifier in the grammar.
1052
//
1053
TIntermTyped* TParseContext::handleDotSwizzle(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
1054
{
1055
TIntermTyped* result = base;
1056
if (base->isScalar()) {
1057
const char* dotFeature = "scalar swizzle";
1058
requireProfile(loc, ~EEsProfile, dotFeature);
1059
profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature);
1060
}
1061
1062
TSwizzleSelectors<TVectorSelector> selectors;
1063
parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
1064
1065
if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat())
1066
requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16");
1067
if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt())
1068
requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16");
1069
if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt())
1070
requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8");
1071
1072
if (base->isScalar()) {
1073
if (selectors.size() == 1)
1074
return result;
1075
else {
1076
TType type(base->getBasicType(), EvqTemporary, selectors.size());
1077
// Swizzle operations propagate specialization-constantness
1078
if (base->getQualifier().isSpecConstant())
1079
type.getQualifier().makeSpecConstant();
1080
return addConstructor(loc, base, type);
1081
}
1082
}
1083
1084
if (base->getType().getQualifier().isFrontEndConstant())
1085
result = intermediate.foldSwizzle(base, selectors, loc);
1086
else {
1087
if (selectors.size() == 1) {
1088
TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
1089
result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
1090
result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision));
1091
} else {
1092
TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
1093
result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
1094
result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size()));
1095
}
1096
// Swizzle operations propagate specialization-constantness
1097
if (base->getType().getQualifier().isSpecConstant())
1098
result->getWritableType().getQualifier().makeSpecConstant();
1099
}
1100
1101
return result;
1102
}
1103
1104
void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName)
1105
{
1106
// a block that needs extension checking is either 'base', or if arrayed,
1107
// one level removed to the left
1108
const TIntermSymbol* baseSymbol = nullptr;
1109
if (base->getAsBinaryNode() == nullptr)
1110
baseSymbol = base->getAsSymbolNode();
1111
else
1112
baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode();
1113
if (baseSymbol == nullptr)
1114
return;
1115
const TSymbol* symbol = symbolTable.find(baseSymbol->getName());
1116
if (symbol == nullptr)
1117
return;
1118
const TVariable* variable = symbol->getAsVariable();
1119
if (variable == nullptr)
1120
return;
1121
if (!variable->hasMemberExtensions())
1122
return;
1123
1124
// We now have a variable that is the base of a dot reference
1125
// with members that need extension checking.
1126
if (variable->getNumMemberExtensions(member) > 0)
1127
requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str());
1128
}
1129
1130
//
1131
// Handle seeing a function declarator in the grammar. This is the precursor
1132
// to recognizing a function prototype or function definition.
1133
//
1134
TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
1135
{
1136
// ES can't declare prototypes inside functions
1137
if (! symbolTable.atGlobalLevel())
1138
requireProfile(loc, ~EEsProfile, "local function declaration");
1139
1140
//
1141
// Multiple declarations of the same function name are allowed.
1142
//
1143
// If this is a definition, the definition production code will check for redefinitions
1144
// (we don't know at this point if it's a definition or not).
1145
//
1146
// Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match.
1147
// - except ES 100, which only allows a single prototype
1148
//
1149
// ES 100 does not allow redefining, but does allow overloading of built-in functions.
1150
// ES 300 does not allow redefining or overloading of built-in functions.
1151
//
1152
bool builtIn;
1153
TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn);
1154
if (symbol && symbol->getAsFunction() && builtIn)
1155
requireProfile(loc, ~EEsProfile, "redefinition of built-in function");
1156
// Check the validity of using spirv_literal qualifier
1157
for (int i = 0; i < function.getParamCount(); ++i) {
1158
if (function[i].type->getQualifier().isSpirvLiteral() && function.getBuiltInOp() != EOpSpirvInst)
1159
error(loc, "'spirv_literal' can only be used on functions defined with 'spirv_instruction' for argument",
1160
function.getName().c_str(), "%d", i + 1);
1161
}
1162
1163
// For function declaration with SPIR-V instruction qualifier, always ignore the built-in function and
1164
// respect this redeclared one.
1165
if (symbol && builtIn && function.getBuiltInOp() == EOpSpirvInst)
1166
symbol = nullptr;
1167
const TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
1168
if (prevDec) {
1169
if (prevDec->isPrototyped() && prototype)
1170
profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function");
1171
if (prevDec->getType() != function.getType())
1172
error(loc, "overloaded functions must have the same return type", function.getName().c_str(), "");
1173
if (prevDec->getSpirvInstruction() != function.getSpirvInstruction()) {
1174
error(loc, "overloaded functions must have the same qualifiers", function.getName().c_str(),
1175
"spirv_instruction");
1176
}
1177
for (int i = 0; i < prevDec->getParamCount(); ++i) {
1178
if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage)
1179
error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1);
1180
1181
if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision)
1182
error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1);
1183
}
1184
}
1185
1186
arrayObjectCheck(loc, function.getType(), "array in function return type");
1187
1188
if (prototype) {
1189
// All built-in functions are defined, even though they don't have a body.
1190
// Count their prototype as a definition instead.
1191
if (symbolTable.atBuiltInLevel())
1192
function.setDefined();
1193
else {
1194
if (prevDec && ! builtIn)
1195
symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const
1196
function.setPrototyped();
1197
}
1198
}
1199
1200
// This insert won't actually insert it if it's a duplicate signature, but it will still check for
1201
// other forms of name collisions.
1202
if (! symbolTable.insert(function))
1203
error(loc, "function name is redeclaration of existing name", function.getName().c_str(), "");
1204
1205
//
1206
// If this is a redeclaration, it could also be a definition,
1207
// in which case, we need to use the parameter names from this one, and not the one that's
1208
// being redeclared. So, pass back this declaration, not the one in the symbol table.
1209
//
1210
return &function;
1211
}
1212
1213
//
1214
// Handle seeing the function prototype in front of a function definition in the grammar.
1215
// The body is handled after this function returns.
1216
//
1217
TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function)
1218
{
1219
currentCaller = function.getMangledName();
1220
TSymbol* symbol = symbolTable.find(function.getMangledName());
1221
TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
1222
1223
if (! prevDec)
1224
error(loc, "can't find function", function.getName().c_str(), "");
1225
// Note: 'prevDec' could be 'function' if this is the first time we've seen function
1226
// as it would have just been put in the symbol table. Otherwise, we're looking up
1227
// an earlier occurrence.
1228
1229
if (prevDec && prevDec->isDefined()) {
1230
// Then this function already has a body.
1231
error(loc, "function already has a body", function.getName().c_str(), "");
1232
}
1233
if (prevDec && ! prevDec->isDefined()) {
1234
prevDec->setDefined();
1235
1236
// Remember the return type for later checking for RETURN statements.
1237
currentFunctionType = &(prevDec->getType());
1238
} else
1239
currentFunctionType = new TType(EbtVoid);
1240
functionReturnsValue = false;
1241
1242
// Check for entry point
1243
if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) {
1244
intermediate.setEntryPointMangledName(function.getMangledName().c_str());
1245
intermediate.incrementEntryPointCount();
1246
inMain = true;
1247
} else
1248
inMain = false;
1249
1250
//
1251
// Raise error message if main function takes any parameters or returns anything other than void
1252
//
1253
if (inMain) {
1254
if (function.getParamCount() > 0)
1255
error(loc, "function cannot take any parameter(s)", function.getName().c_str(), "");
1256
if (function.getType().getBasicType() != EbtVoid)
1257
error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value");
1258
if (function.getLinkType() != ELinkNone)
1259
error(loc, "main function cannot be exported", "", "");
1260
}
1261
1262
//
1263
// New symbol table scope for body of function plus its arguments
1264
//
1265
symbolTable.push();
1266
1267
//
1268
// Insert parameters into the symbol table.
1269
// If the parameter has no name, it's not an error, just don't insert it
1270
// (could be used for unused args).
1271
//
1272
// Also, accumulate the list of parameters into the HIL, so lower level code
1273
// knows where to find parameters.
1274
//
1275
TIntermAggregate* paramNodes = new TIntermAggregate;
1276
for (int i = 0; i < function.getParamCount(); i++) {
1277
TParameter& param = function[i];
1278
if (param.name != nullptr) {
1279
TVariable *variable = new TVariable(param.name, *param.type);
1280
1281
// Insert the parameters with name in the symbol table.
1282
if (! symbolTable.insert(*variable))
1283
error(loc, "redefinition", variable->getName().c_str(), "");
1284
else {
1285
// Transfer ownership of name pointer to symbol table.
1286
param.name = nullptr;
1287
1288
// Add the parameter to the HIL
1289
paramNodes = intermediate.growAggregate(paramNodes,
1290
intermediate.addSymbol(*variable, loc),
1291
loc);
1292
}
1293
} else
1294
paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc);
1295
}
1296
paramNodes->setLinkType(function.getLinkType());
1297
intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc);
1298
loopNestingLevel = 0;
1299
statementNestingLevel = 0;
1300
controlFlowNestingLevel = 0;
1301
postEntryPointReturn = false;
1302
1303
return paramNodes;
1304
}
1305
1306
//
1307
// Handle seeing function call syntax in the grammar, which could be any of
1308
// - .length() method
1309
// - constructor
1310
// - a call to a built-in function mapped to an operator
1311
// - a call to a built-in function that will remain a function call (e.g., texturing)
1312
// - user function
1313
// - subroutine call (not implemented yet)
1314
//
1315
TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments)
1316
{
1317
TIntermTyped* result = nullptr;
1318
1319
if (spvVersion.vulkan != 0 && spvVersion.vulkanRelaxed) {
1320
// allow calls that are invalid in Vulkan Semantics to be invisibily
1321
// remapped to equivalent valid functions
1322
result = vkRelaxedRemapFunctionCall(loc, function, arguments);
1323
if (result)
1324
return result;
1325
}
1326
1327
if (function->getBuiltInOp() == EOpArrayLength)
1328
result = handleLengthMethod(loc, function, arguments);
1329
else if (function->getBuiltInOp() != EOpNull) {
1330
//
1331
// Then this should be a constructor.
1332
// Don't go through the symbol table for constructors.
1333
// Their parameters will be verified algorithmically.
1334
//
1335
TType type(EbtVoid); // use this to get the type back
1336
if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) {
1337
//
1338
// It's a constructor, of type 'type'.
1339
//
1340
result = addConstructor(loc, arguments, type);
1341
if (result == nullptr)
1342
error(loc, "cannot construct with these arguments", type.getCompleteString(intermediate.getEnhancedMsgs()).c_str(), "");
1343
}
1344
} else {
1345
//
1346
// Find it in the symbol table.
1347
//
1348
const TFunction* fnCandidate;
1349
bool builtIn {false};
1350
fnCandidate = findFunction(loc, *function, builtIn);
1351
if (fnCandidate) {
1352
// This is a declared function that might map to
1353
// - a built-in operator,
1354
// - a built-in function not mapped to an operator, or
1355
// - a user function.
1356
1357
// Error check for a function requiring specific extensions present.
1358
if (builtIn &&
1359
(fnCandidate->getBuiltInOp() == EOpSubgroupQuadAll || fnCandidate->getBuiltInOp() == EOpSubgroupQuadAny))
1360
requireExtensions(loc, 1, &E_GL_EXT_shader_quad_control, fnCandidate->getName().c_str());
1361
1362
if (builtIn && fnCandidate->getNumExtensions())
1363
requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str());
1364
1365
if (builtIn && fnCandidate->getType().contains16BitFloat())
1366
requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage");
1367
if (builtIn && fnCandidate->getType().contains16BitInt())
1368
requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
1369
if (builtIn && fnCandidate->getType().contains8BitInt())
1370
requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
1371
if (builtIn && (fnCandidate->getBuiltInOp() == EOpTextureFetch || fnCandidate->getBuiltInOp() == EOpTextureQuerySize)) {
1372
if ((*fnCandidate)[0].type->getSampler().isMultiSample() && version <= 140)
1373
requireExtensions(loc, 1, &E_GL_ARB_texture_multisample, fnCandidate->getName().c_str());
1374
}
1375
if (arguments != nullptr) {
1376
// Make sure qualifications work for these arguments.
1377
TIntermAggregate* aggregate = arguments->getAsAggregate();
1378
for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
1379
// At this early point there is a slight ambiguity between whether an aggregate 'arguments'
1380
// is the single argument itself or its children are the arguments. Only one argument
1381
// means take 'arguments' itself as the one argument.
1382
TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments);
1383
TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier();
1384
if (formalQualifier.isParamOutput()) {
1385
if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped()))
1386
error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", "");
1387
}
1388
if (formalQualifier.isSpirvLiteral()) {
1389
if (!arg->getAsTyped()->getQualifier().isFrontEndConstant()) {
1390
error(arguments->getLoc(),
1391
"Non front-end constant expressions cannot be passed for 'spirv_literal' parameters.",
1392
"spirv_literal", "");
1393
}
1394
}
1395
const TType& argType = arg->getAsTyped()->getType();
1396
const TQualifier& argQualifier = argType.getQualifier();
1397
bool containsBindlessSampler = intermediate.getBindlessMode() && argType.containsSampler();
1398
if (argQualifier.isMemory() && !containsBindlessSampler && (argType.containsOpaque() || argType.isReference())) {
1399
const char* message = "argument cannot drop memory qualifier when passed to formal parameter";
1400
if (argQualifier.volatil && ! formalQualifier.volatil)
1401
error(arguments->getLoc(), message, "volatile", "");
1402
if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
1403
error(arguments->getLoc(), message, "coherent", "");
1404
if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
1405
error(arguments->getLoc(), message, "devicecoherent", "");
1406
if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
1407
error(arguments->getLoc(), message, "queuefamilycoherent", "");
1408
if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
1409
error(arguments->getLoc(), message, "workgroupcoherent", "");
1410
if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
1411
error(arguments->getLoc(), message, "subgroupcoherent", "");
1412
if (argQualifier.readonly && ! formalQualifier.readonly)
1413
error(arguments->getLoc(), message, "readonly", "");
1414
if (argQualifier.writeonly && ! formalQualifier.writeonly)
1415
error(arguments->getLoc(), message, "writeonly", "");
1416
// Don't check 'restrict', it is different than the rest:
1417
// "...but only restrict can be taken away from a calling argument, by a formal parameter that
1418
// lacks the restrict qualifier..."
1419
}
1420
if (!builtIn && argQualifier.getFormat() != formalQualifier.getFormat()) {
1421
// we have mismatched formats, which should only be allowed if writeonly
1422
// and at least one format is unknown
1423
if (!formalQualifier.isWriteOnly() || (formalQualifier.getFormat() != ElfNone &&
1424
argQualifier.getFormat() != ElfNone))
1425
error(arguments->getLoc(), "image formats must match", "format", "");
1426
}
1427
if (builtIn && arg->getAsTyped()->getType().contains16BitFloat())
1428
requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage");
1429
if (builtIn && arg->getAsTyped()->getType().contains16BitInt())
1430
requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
1431
if (builtIn && arg->getAsTyped()->getType().contains8BitInt())
1432
requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
1433
1434
// TODO 4.5 functionality: A shader will fail to compile
1435
// if the value passed to the memargument of an atomic memory function does not correspond to a buffer or
1436
// shared variable. It is acceptable to pass an element of an array or a single component of a vector to the
1437
// memargument of an atomic memory function, as long as the underlying array or vector is a buffer or
1438
// shared variable.
1439
}
1440
1441
// Convert 'in' arguments
1442
addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node
1443
}
1444
1445
if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) {
1446
// A function call mapped to a built-in operation.
1447
result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate);
1448
} else if (fnCandidate->getBuiltInOp() == EOpSpirvInst) {
1449
// When SPIR-V instruction qualifier is specified, the function call is still mapped to a built-in operation.
1450
result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate);
1451
} else {
1452
// This is a function call not mapped to built-in operator.
1453
// It could still be a built-in function, but only if PureOperatorBuiltins == false.
1454
result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc);
1455
TIntermAggregate* call = result->getAsAggregate();
1456
call->setName(fnCandidate->getMangledName());
1457
1458
// this is how we know whether the given function is a built-in function or a user-defined function
1459
// if builtIn == false, it's a userDefined -> could be an overloaded built-in function also
1460
// if builtIn == true, it's definitely a built-in function with EOpNull
1461
if (! builtIn) {
1462
call->setUserDefined();
1463
if (symbolTable.atGlobalLevel()) {
1464
requireProfile(loc, ~EEsProfile, "calling user function from global scope");
1465
intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName());
1466
} else
1467
intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName());
1468
}
1469
1470
if (builtIn)
1471
nonOpBuiltInCheck(loc, *fnCandidate, *call);
1472
else
1473
userFunctionCallCheck(loc, *call);
1474
}
1475
1476
// Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore.
1477
// Built-ins with a single argument aren't called with an aggregate, but they also don't have an output.
1478
// Also, build the qualifier list for user function calls, which are always called with an aggregate.
1479
if (result->getAsAggregate()) {
1480
TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList();
1481
for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
1482
TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage;
1483
qualifierList.push_back(qual);
1484
}
1485
result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate());
1486
}
1487
1488
if (result->getAsTyped()->getType().isCoopMat() &&
1489
!result->getAsTyped()->getType().isParameterized()) {
1490
assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd ||
1491
fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAddNV);
1492
1493
result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType());
1494
}
1495
}
1496
}
1497
1498
// generic error recovery
1499
// TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades
1500
if (result == nullptr)
1501
result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
1502
1503
return result;
1504
}
1505
1506
TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments,
1507
const TFunction& function)
1508
{
1509
checkLocation(loc, function.getBuiltInOp());
1510
TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(),
1511
function.getParamCount() == 1,
1512
arguments, function.getType());
1513
if (result != nullptr && obeyPrecisionQualifiers())
1514
computeBuiltinPrecisions(*result, function);
1515
1516
if (result == nullptr) {
1517
if (arguments == nullptr)
1518
error(loc, " wrong operand type", "Internal Error",
1519
"built in unary operator function. Type: %s", "");
1520
else
1521
error(arguments->getLoc(), " wrong operand type", "Internal Error",
1522
"built in unary operator function. Type: %s",
1523
static_cast<TIntermTyped*>(arguments)->getCompleteString(intermediate.getEnhancedMsgs()).c_str());
1524
} else if (result->getAsOperator())
1525
builtInOpCheck(loc, function, *result->getAsOperator());
1526
1527
// Special handling for function call with SPIR-V instruction qualifier specified
1528
if (function.getBuiltInOp() == EOpSpirvInst) {
1529
if (auto agg = result->getAsAggregate()) {
1530
// Propogate spirv_by_reference/spirv_literal from parameters to arguments
1531
auto& sequence = agg->getSequence();
1532
for (unsigned i = 0; i < sequence.size(); ++i) {
1533
if (function[i].type->getQualifier().isSpirvByReference())
1534
sequence[i]->getAsTyped()->getQualifier().setSpirvByReference();
1535
if (function[i].type->getQualifier().isSpirvLiteral())
1536
sequence[i]->getAsTyped()->getQualifier().setSpirvLiteral();
1537
}
1538
1539
// Attach the function call to SPIR-V intruction
1540
agg->setSpirvInstruction(function.getSpirvInstruction());
1541
} else if (auto unaryNode = result->getAsUnaryNode()) {
1542
// Propogate spirv_by_reference/spirv_literal from parameters to arguments
1543
if (function[0].type->getQualifier().isSpirvByReference())
1544
unaryNode->getOperand()->getQualifier().setSpirvByReference();
1545
if (function[0].type->getQualifier().isSpirvLiteral())
1546
unaryNode->getOperand()->getQualifier().setSpirvLiteral();
1547
1548
// Attach the function call to SPIR-V intruction
1549
unaryNode->setSpirvInstruction(function.getSpirvInstruction());
1550
} else
1551
assert(0);
1552
}
1553
1554
return result;
1555
}
1556
1557
// "The operation of a built-in function can have a different precision
1558
// qualification than the precision qualification of the resulting value.
1559
// These two precision qualifications are established as follows.
1560
//
1561
// The precision qualification of the operation of a built-in function is
1562
// based on the precision qualification of its input arguments and formal
1563
// parameters: When a formal parameter specifies a precision qualifier,
1564
// that is used, otherwise, the precision qualification of the calling
1565
// argument is used. The highest precision of these will be the precision
1566
// qualification of the operation of the built-in function. Generally,
1567
// this is applied across all arguments to a built-in function, with the
1568
// exceptions being:
1569
// - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits'
1570
// arguments.
1571
// - interpolateAt* functions only look at the 'interpolant' argument.
1572
//
1573
// The precision qualification of the result of a built-in function is
1574
// determined in one of the following ways:
1575
//
1576
// - For the texture sampling, image load, and image store functions,
1577
// the precision of the return type matches the precision of the
1578
// sampler type
1579
//
1580
// Otherwise:
1581
//
1582
// - For prototypes that do not specify a resulting precision qualifier,
1583
// the precision will be the same as the precision of the operation.
1584
//
1585
// - For prototypes that do specify a resulting precision qualifier,
1586
// the specified precision qualifier is the precision qualification of
1587
// the result."
1588
//
1589
void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function)
1590
{
1591
TPrecisionQualifier operationPrecision = EpqNone;
1592
TPrecisionQualifier resultPrecision = EpqNone;
1593
1594
TIntermOperator* opNode = node.getAsOperator();
1595
if (opNode == nullptr)
1596
return;
1597
1598
if (TIntermUnary* unaryNode = node.getAsUnaryNode()) {
1599
operationPrecision = std::max(function[0].type->getQualifier().precision,
1600
unaryNode->getOperand()->getType().getQualifier().precision);
1601
if (function.getType().getBasicType() != EbtBool)
1602
resultPrecision = function.getType().getQualifier().precision == EpqNone ?
1603
operationPrecision :
1604
function.getType().getQualifier().precision;
1605
} else if (TIntermAggregate* agg = node.getAsAggregate()) {
1606
TIntermSequence& sequence = agg->getSequence();
1607
unsigned int numArgs = (unsigned int)sequence.size();
1608
switch (agg->getOp()) {
1609
case EOpBitfieldExtract:
1610
numArgs = 1;
1611
break;
1612
case EOpBitfieldInsert:
1613
numArgs = 2;
1614
break;
1615
case EOpInterpolateAtCentroid:
1616
case EOpInterpolateAtOffset:
1617
case EOpInterpolateAtSample:
1618
numArgs = 1;
1619
break;
1620
case EOpDebugPrintf:
1621
numArgs = 0;
1622
break;
1623
default:
1624
break;
1625
}
1626
// find the maximum precision from the arguments and parameters
1627
for (unsigned int arg = 0; arg < numArgs; ++arg) {
1628
operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision);
1629
operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision);
1630
}
1631
// compute the result precision
1632
if (agg->isSampling() ||
1633
agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore ||
1634
agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod)
1635
resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision;
1636
else if (function.getType().getBasicType() != EbtBool)
1637
resultPrecision = function.getType().getQualifier().precision == EpqNone ?
1638
operationPrecision :
1639
function.getType().getQualifier().precision;
1640
}
1641
1642
// Propagate precision through this node and its children. That algorithm stops
1643
// when a precision is found, so start by clearing this subroot precision
1644
opNode->getQualifier().precision = EpqNone;
1645
if (operationPrecision != EpqNone) {
1646
opNode->propagatePrecision(operationPrecision);
1647
opNode->setOperationPrecision(operationPrecision);
1648
}
1649
// Now, set the result precision, which might not match
1650
opNode->getQualifier().precision = resultPrecision;
1651
}
1652
1653
TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value)
1654
{
1655
storage16BitAssignmentCheck(loc, value->getType(), "return");
1656
1657
functionReturnsValue = true;
1658
TIntermBranch* branch = nullptr;
1659
if (currentFunctionType->getBasicType() == EbtVoid) {
1660
error(loc, "void function cannot return a value", "return", "");
1661
branch = intermediate.addBranch(EOpReturn, loc);
1662
} else if (*currentFunctionType != value->getType()) {
1663
TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value);
1664
if (converted) {
1665
if (*currentFunctionType != converted->getType())
1666
error(loc, "cannot convert return value to function return type", "return", "");
1667
if (version < 420)
1668
warn(loc, "type conversion on return values was not explicitly allowed until version 420",
1669
"return", "");
1670
branch = intermediate.addBranch(EOpReturn, converted, loc);
1671
} else {
1672
error(loc, "type does not match, or is not convertible to, the function's return type", "return", "");
1673
branch = intermediate.addBranch(EOpReturn, value, loc);
1674
}
1675
} else {
1676
if (value->getType().isTexture() || value->getType().isImage()) {
1677
if (spvVersion.spv != 0)
1678
error(loc, "sampler or image cannot be used as return type when generating SPIR-V", "return", "");
1679
else if (!extensionTurnedOn(E_GL_ARB_bindless_texture))
1680
error(loc, "sampler or image can be used as return type only when the extension GL_ARB_bindless_texture enabled", "return", "");
1681
}
1682
branch = intermediate.addBranch(EOpReturn, value, loc);
1683
}
1684
branch->updatePrecision(currentFunctionType->getQualifier().precision);
1685
return branch;
1686
}
1687
1688
// See if the operation is being done in an illegal location.
1689
void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op)
1690
{
1691
switch (op) {
1692
case EOpBarrier:
1693
if (language == EShLangTessControl) {
1694
if (controlFlowNestingLevel > 0)
1695
error(loc, "tessellation control barrier() cannot be placed within flow control", "", "");
1696
if (! inMain)
1697
error(loc, "tessellation control barrier() must be in main()", "", "");
1698
else if (postEntryPointReturn)
1699
error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", "");
1700
}
1701
break;
1702
case EOpBeginInvocationInterlock:
1703
if (language != EShLangFragment)
1704
error(loc, "beginInvocationInterlockARB() must be in a fragment shader", "", "");
1705
if (! inMain)
1706
error(loc, "beginInvocationInterlockARB() must be in main()", "", "");
1707
else if (postEntryPointReturn)
1708
error(loc, "beginInvocationInterlockARB() cannot be placed after a return from main()", "", "");
1709
if (controlFlowNestingLevel > 0)
1710
error(loc, "beginInvocationInterlockARB() cannot be placed within flow control", "", "");
1711
1712
if (beginInvocationInterlockCount > 0)
1713
error(loc, "beginInvocationInterlockARB() must only be called once", "", "");
1714
if (endInvocationInterlockCount > 0)
1715
error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", "");
1716
1717
beginInvocationInterlockCount++;
1718
1719
// default to pixel_interlock_ordered
1720
if (intermediate.getInterlockOrdering() == EioNone)
1721
intermediate.setInterlockOrdering(EioPixelInterlockOrdered);
1722
break;
1723
case EOpEndInvocationInterlock:
1724
if (language != EShLangFragment)
1725
error(loc, "endInvocationInterlockARB() must be in a fragment shader", "", "");
1726
if (! inMain)
1727
error(loc, "endInvocationInterlockARB() must be in main()", "", "");
1728
else if (postEntryPointReturn)
1729
error(loc, "endInvocationInterlockARB() cannot be placed after a return from main()", "", "");
1730
if (controlFlowNestingLevel > 0)
1731
error(loc, "endInvocationInterlockARB() cannot be placed within flow control", "", "");
1732
1733
if (endInvocationInterlockCount > 0)
1734
error(loc, "endInvocationInterlockARB() must only be called once", "", "");
1735
if (beginInvocationInterlockCount == 0)
1736
error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", "");
1737
1738
endInvocationInterlockCount++;
1739
break;
1740
default:
1741
break;
1742
}
1743
}
1744
1745
// Finish processing object.length(). This started earlier in handleDotDereference(), where
1746
// the ".length" part was recognized and semantically checked, and finished here where the
1747
// function syntax "()" is recognized.
1748
//
1749
// Return resulting tree node.
1750
TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode)
1751
{
1752
int length = 0;
1753
1754
if (function->getParamCount() > 0)
1755
error(loc, "method does not accept any arguments", function->getName().c_str(), "");
1756
else {
1757
const TType& type = intermNode->getAsTyped()->getType();
1758
if (type.isArray()) {
1759
if (type.isUnsizedArray()) {
1760
if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) {
1761
// We could be between a layout declaration that gives a built-in io array implicit size and
1762
// a user redeclaration of that array, meaning we have to substitute its implicit size here
1763
// without actually redeclaring the array. (It is an error to use a member before the
1764
// redeclaration, but not an error to use the array name itself.)
1765
const TString& name = intermNode->getAsSymbolNode()->getName();
1766
if (name == "gl_in" || name == "gl_out" || name == "gl_MeshVerticesNV" ||
1767
name == "gl_MeshPrimitivesNV") {
1768
length = getIoArrayImplicitSize(type.getQualifier());
1769
}
1770
} else if (const auto typed = intermNode->getAsTyped()) {
1771
if (typed->getQualifier().builtIn == EbvSampleMask) {
1772
requireProfile(loc, EEsProfile, "the array size of gl_SampleMask and gl_SampleMaskIn is ceil(gl_MaxSamples/32)");
1773
length = (resources.maxSamples + 31) / 32;
1774
}
1775
}
1776
if (length == 0) {
1777
if (intermNode->getAsSymbolNode() && isIoResizeArray(type))
1778
error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier");
1779
else if (isRuntimeLength(*intermNode->getAsTyped())) {
1780
// Create a unary op and let the back end handle it
1781
return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
1782
} else
1783
error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method");
1784
}
1785
} else if (type.getOuterArrayNode()) {
1786
// If the array's outer size is specified by an intermediate node, it means the array's length
1787
// was specified by a specialization constant. In such a case, we should return the node of the
1788
// specialization constants to represent the length.
1789
return type.getOuterArrayNode();
1790
} else
1791
length = type.getOuterArraySize();
1792
} else if (type.isMatrix())
1793
length = type.getMatrixCols();
1794
else if (type.isVector())
1795
length = type.getVectorSize();
1796
else if (type.isCoopMat())
1797
return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
1798
else {
1799
// we should not get here, because earlier semantic checking should have prevented this path
1800
error(loc, ".length()", "unexpected use of .length()", "");
1801
}
1802
}
1803
1804
if (length == 0)
1805
length = 1;
1806
1807
return intermediate.addConstantUnion(length, loc);
1808
}
1809
1810
//
1811
// Add any needed implicit conversions for function-call arguments to input parameters.
1812
//
1813
void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const
1814
{
1815
TIntermAggregate* aggregate = arguments->getAsAggregate();
1816
1817
// Process each argument's conversion
1818
for (int i = 0; i < function.getParamCount(); ++i) {
1819
// At this early point there is a slight ambiguity between whether an aggregate 'arguments'
1820
// is the single argument itself or its children are the arguments. Only one argument
1821
// means take 'arguments' itself as the one argument.
1822
TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped());
1823
if (*function[i].type != arg->getType()) {
1824
if (function[i].type->getQualifier().isParamInput() &&
1825
!function[i].type->isCoopMat()) {
1826
// In-qualified arguments just need an extra node added above the argument to
1827
// convert to the correct type.
1828
arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg);
1829
if (arg) {
1830
if (function.getParamCount() == 1)
1831
arguments = arg;
1832
else {
1833
if (aggregate)
1834
aggregate->getSequence()[i] = arg;
1835
else
1836
arguments = arg;
1837
}
1838
}
1839
}
1840
}
1841
}
1842
}
1843
1844
//
1845
// Add any needed implicit output conversions for function-call arguments. This
1846
// can require a new tree topology, complicated further by whether the function
1847
// has a return value.
1848
//
1849
// Returns a node of a subtree that evaluates to the return value of the function.
1850
//
1851
TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const
1852
{
1853
TIntermSequence& arguments = intermNode.getSequence();
1854
1855
// Will there be any output conversions?
1856
bool outputConversions = false;
1857
for (int i = 0; i < function.getParamCount(); ++i) {
1858
if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) {
1859
outputConversions = true;
1860
break;
1861
}
1862
}
1863
1864
if (! outputConversions)
1865
return &intermNode;
1866
1867
// Setup for the new tree, if needed:
1868
//
1869
// Output conversions need a different tree topology.
1870
// Out-qualified arguments need a temporary of the correct type, with the call
1871
// followed by an assignment of the temporary to the original argument:
1872
// void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...)
1873
// ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet)
1874
// Where the "tempArg" type needs no conversion as an argument, but will convert on assignment.
1875
TIntermTyped* conversionTree = nullptr;
1876
TVariable* tempRet = nullptr;
1877
if (intermNode.getBasicType() != EbtVoid) {
1878
// do the "tempRet = function(...), " bit from above
1879
tempRet = makeInternalVariable("tempReturn", intermNode.getType());
1880
TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
1881
conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc());
1882
} else
1883
conversionTree = &intermNode;
1884
1885
conversionTree = intermediate.makeAggregate(conversionTree);
1886
1887
// Process each argument's conversion
1888
for (int i = 0; i < function.getParamCount(); ++i) {
1889
if (*function[i].type != arguments[i]->getAsTyped()->getType()) {
1890
if (function[i].type->getQualifier().isParamOutput()) {
1891
// Out-qualified arguments need to use the topology set up above.
1892
// do the " ...(tempArg, ...), arg = tempArg" bit from above
1893
TType paramType;
1894
paramType.shallowCopy(*function[i].type);
1895
if (arguments[i]->getAsTyped()->getType().isParameterized() &&
1896
!paramType.isParameterized()) {
1897
paramType.shallowCopy(arguments[i]->getAsTyped()->getType());
1898
paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters());
1899
}
1900
TVariable* tempArg = makeInternalVariable("tempArg", paramType);
1901
tempArg->getWritableType().getQualifier().makeTemporary();
1902
TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc());
1903
TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc());
1904
conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc());
1905
// replace the argument with another node for the same tempArg variable
1906
arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc());
1907
}
1908
}
1909
}
1910
1911
// Finalize the tree topology (see bigger comment above).
1912
if (tempRet) {
1913
// do the "..., tempRet" bit from above
1914
TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
1915
conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc());
1916
}
1917
conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc());
1918
1919
return conversionTree;
1920
}
1921
1922
TIntermTyped* TParseContext::addAssign(const TSourceLoc& loc, TOperator op, TIntermTyped* left, TIntermTyped* right)
1923
{
1924
if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference())
1925
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "+= and -= on a buffer reference");
1926
1927
if (op == EOpAssign && left->getBasicType() == EbtSampler && right->getBasicType() == EbtSampler)
1928
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "sampler assignment for bindless texture");
1929
1930
return intermediate.addAssign(op, left, right, loc);
1931
}
1932
1933
void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode)
1934
{
1935
const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence();
1936
1937
//const int gl_SemanticsRelaxed = 0x0;
1938
const int gl_SemanticsAcquire = 0x2;
1939
const int gl_SemanticsRelease = 0x4;
1940
const int gl_SemanticsAcquireRelease = 0x8;
1941
const int gl_SemanticsMakeAvailable = 0x2000;
1942
const int gl_SemanticsMakeVisible = 0x4000;
1943
const int gl_SemanticsVolatile = 0x8000;
1944
1945
//const int gl_StorageSemanticsNone = 0x0;
1946
const int gl_StorageSemanticsBuffer = 0x40;
1947
const int gl_StorageSemanticsShared = 0x100;
1948
const int gl_StorageSemanticsImage = 0x800;
1949
const int gl_StorageSemanticsOutput = 0x1000;
1950
1951
1952
unsigned int semantics = 0, storageClassSemantics = 0;
1953
unsigned int semantics2 = 0, storageClassSemantics2 = 0;
1954
1955
const TIntermTyped* arg0 = (*argp)[0]->getAsTyped();
1956
const bool isMS = arg0->getBasicType() == EbtSampler && arg0->getType().getSampler().isMultiSample();
1957
1958
// Grab the semantics and storage class semantics from the operands, based on opcode
1959
switch (callNode.getOp()) {
1960
case EOpAtomicAdd:
1961
case EOpAtomicSubtract:
1962
case EOpAtomicMin:
1963
case EOpAtomicMax:
1964
case EOpAtomicAnd:
1965
case EOpAtomicOr:
1966
case EOpAtomicXor:
1967
case EOpAtomicExchange:
1968
case EOpAtomicStore:
1969
storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
1970
semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
1971
break;
1972
case EOpAtomicLoad:
1973
storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
1974
semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
1975
break;
1976
case EOpAtomicCompSwap:
1977
storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
1978
semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
1979
storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
1980
semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
1981
break;
1982
1983
case EOpImageAtomicAdd:
1984
case EOpImageAtomicMin:
1985
case EOpImageAtomicMax:
1986
case EOpImageAtomicAnd:
1987
case EOpImageAtomicOr:
1988
case EOpImageAtomicXor:
1989
case EOpImageAtomicExchange:
1990
case EOpImageAtomicStore:
1991
storageClassSemantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst();
1992
semantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst();
1993
break;
1994
case EOpImageAtomicLoad:
1995
storageClassSemantics = (*argp)[isMS ? 4 : 3]->getAsConstantUnion()->getConstArray()[0].getIConst();
1996
semantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst();
1997
break;
1998
case EOpImageAtomicCompSwap:
1999
storageClassSemantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst();
2000
semantics = (*argp)[isMS ? 7 : 6]->getAsConstantUnion()->getConstArray()[0].getIConst();
2001
storageClassSemantics2 = (*argp)[isMS ? 8 : 7]->getAsConstantUnion()->getConstArray()[0].getIConst();
2002
semantics2 = (*argp)[isMS ? 9 : 8]->getAsConstantUnion()->getConstArray()[0].getIConst();
2003
break;
2004
2005
case EOpBarrier:
2006
storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
2007
semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
2008
break;
2009
case EOpMemoryBarrier:
2010
storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
2011
semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
2012
break;
2013
default:
2014
break;
2015
}
2016
2017
if ((semantics & gl_SemanticsAcquire) &&
2018
(callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) {
2019
error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store",
2020
fnCandidate.getName().c_str(), "");
2021
}
2022
if ((semantics & gl_SemanticsRelease) &&
2023
(callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
2024
error(loc, "gl_SemanticsRelease must not be used with (image) atomic load",
2025
fnCandidate.getName().c_str(), "");
2026
}
2027
if ((semantics & gl_SemanticsAcquireRelease) &&
2028
(callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore ||
2029
callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
2030
error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store",
2031
fnCandidate.getName().c_str(), "");
2032
}
2033
if (((semantics | semantics2) & ~(gl_SemanticsAcquire |
2034
gl_SemanticsRelease |
2035
gl_SemanticsAcquireRelease |
2036
gl_SemanticsMakeAvailable |
2037
gl_SemanticsMakeVisible |
2038
gl_SemanticsVolatile))) {
2039
error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), "");
2040
}
2041
if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer |
2042
gl_StorageSemanticsShared |
2043
gl_StorageSemanticsImage |
2044
gl_StorageSemanticsOutput))) {
2045
error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), "");
2046
}
2047
2048
if (callNode.getOp() == EOpMemoryBarrier) {
2049
if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
2050
error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or "
2051
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
2052
}
2053
} else {
2054
if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
2055
if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
2056
error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
2057
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
2058
}
2059
}
2060
if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
2061
if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
2062
error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
2063
"gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
2064
}
2065
}
2066
}
2067
if (callNode.getOp() == EOpMemoryBarrier) {
2068
if (storageClassSemantics == 0) {
2069
error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
2070
}
2071
}
2072
if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) {
2073
error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
2074
}
2075
if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) &&
2076
(semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
2077
error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease",
2078
fnCandidate.getName().c_str(), "");
2079
}
2080
if ((semantics & gl_SemanticsMakeAvailable) &&
2081
!(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
2082
error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease",
2083
fnCandidate.getName().c_str(), "");
2084
}
2085
if ((semantics & gl_SemanticsMakeVisible) &&
2086
!(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) {
2087
error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease",
2088
fnCandidate.getName().c_str(), "");
2089
}
2090
if ((semantics & gl_SemanticsVolatile) &&
2091
(callNode.getOp() == EOpMemoryBarrier || callNode.getOp() == EOpBarrier)) {
2092
error(loc, "gl_SemanticsVolatile must not be used with memoryBarrier or controlBarrier",
2093
fnCandidate.getName().c_str(), "");
2094
}
2095
if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) &&
2096
((semantics ^ semantics2) & gl_SemanticsVolatile)) {
2097
error(loc, "semEqual and semUnequal must either both include gl_SemanticsVolatile or neither",
2098
fnCandidate.getName().c_str(), "");
2099
}
2100
}
2101
2102
//
2103
// Do additional checking of built-in function calls that is not caught
2104
// by normal semantic checks on argument type, extension tagging, etc.
2105
//
2106
// Assumes there has been a semantically correct match to a built-in function prototype.
2107
//
2108
void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode)
2109
{
2110
// Set up convenience accessors to the argument(s). There is almost always
2111
// multiple arguments for the cases below, but when there might be one,
2112
// check the unaryArg first.
2113
const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference
2114
const TIntermTyped* unaryArg = nullptr;
2115
const TIntermTyped* arg0 = nullptr;
2116
if (callNode.getAsAggregate()) {
2117
argp = &callNode.getAsAggregate()->getSequence();
2118
if (argp->size() > 0)
2119
arg0 = (*argp)[0]->getAsTyped();
2120
} else {
2121
assert(callNode.getAsUnaryNode());
2122
unaryArg = callNode.getAsUnaryNode()->getOperand();
2123
arg0 = unaryArg;
2124
}
2125
2126
TString featureString;
2127
const char* feature = nullptr;
2128
switch (callNode.getOp()) {
2129
case EOpTextureGather:
2130
case EOpTextureGatherOffset:
2131
case EOpTextureGatherOffsets:
2132
{
2133
// Figure out which variants are allowed by what extensions,
2134
// and what arguments must be constant for which situations.
2135
2136
featureString = fnCandidate.getName();
2137
featureString += "(...)";
2138
feature = featureString.c_str();
2139
profileRequires(loc, EEsProfile, 310, nullptr, feature);
2140
int compArg = -1; // track which argument, if any, is the constant component argument
2141
switch (callNode.getOp()) {
2142
case EOpTextureGather:
2143
// More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
2144
// otherwise, need GL_ARB_texture_gather.
2145
if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
2146
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2147
if (! fnCandidate[0].type->getSampler().shadow)
2148
compArg = 2;
2149
} else
2150
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
2151
break;
2152
case EOpTextureGatherOffset:
2153
// GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
2154
if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
2155
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
2156
else
2157
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2158
if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
2159
profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
2160
"non-constant offset argument");
2161
if (! fnCandidate[0].type->getSampler().shadow)
2162
compArg = 3;
2163
break;
2164
case EOpTextureGatherOffsets:
2165
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2166
if (! fnCandidate[0].type->getSampler().shadow)
2167
compArg = 3;
2168
// check for constant offsets
2169
if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
2170
error(loc, "must be a compile-time constant:", feature, "offsets argument");
2171
break;
2172
default:
2173
break;
2174
}
2175
2176
if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
2177
if ((*argp)[compArg]->getAsConstantUnion()) {
2178
int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
2179
if (value < 0 || value > 3)
2180
error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
2181
} else
2182
error(loc, "must be a compile-time constant:", feature, "component argument");
2183
}
2184
2185
bool bias = false;
2186
if (callNode.getOp() == EOpTextureGather)
2187
bias = fnCandidate.getParamCount() > 3;
2188
else if (callNode.getOp() == EOpTextureGatherOffset ||
2189
callNode.getOp() == EOpTextureGatherOffsets)
2190
bias = fnCandidate.getParamCount() > 4;
2191
2192
if (bias) {
2193
featureString = fnCandidate.getName();
2194
featureString += "with bias argument";
2195
feature = featureString.c_str();
2196
profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
2197
requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
2198
}
2199
break;
2200
}
2201
2202
case EOpTexture:
2203
case EOpTextureLod:
2204
{
2205
if ((fnCandidate.getParamCount() > 2) && ((*argp)[1]->getAsTyped()->getType().getBasicType() == EbtFloat) &&
2206
((*argp)[1]->getAsTyped()->getType().getVectorSize() == 4) && fnCandidate[0].type->getSampler().shadow) {
2207
featureString = fnCandidate.getName();
2208
if (callNode.getOp() == EOpTexture)
2209
featureString += "(..., float bias)";
2210
else
2211
featureString += "(..., float lod)";
2212
feature = featureString.c_str();
2213
2214
if ((fnCandidate[0].type->getSampler().dim == Esd2D && fnCandidate[0].type->getSampler().arrayed) || //2D Array Shadow
2215
(fnCandidate[0].type->getSampler().dim == EsdCube && fnCandidate[0].type->getSampler().arrayed && fnCandidate.getParamCount() > 3) || // Cube Array Shadow
2216
(fnCandidate[0].type->getSampler().dim == EsdCube && callNode.getOp() == EOpTextureLod)) { // Cube Shadow
2217
requireExtensions(loc, 1, &E_GL_EXT_texture_shadow_lod, feature);
2218
if (isEsProfile()) {
2219
if (version < 320 &&
2220
!extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
2221
error(loc, "GL_EXT_texture_shadow_lod not supported for this ES version", feature, "");
2222
else
2223
profileRequires(loc, EEsProfile, 320, nullptr, feature);
2224
} else { // Desktop
2225
profileRequires(loc, ~EEsProfile, 130, nullptr, feature);
2226
}
2227
}
2228
}
2229
break;
2230
}
2231
2232
case EOpSparseTextureGather:
2233
case EOpSparseTextureGatherOffset:
2234
case EOpSparseTextureGatherOffsets:
2235
{
2236
bool bias = false;
2237
if (callNode.getOp() == EOpSparseTextureGather)
2238
bias = fnCandidate.getParamCount() > 4;
2239
else if (callNode.getOp() == EOpSparseTextureGatherOffset ||
2240
callNode.getOp() == EOpSparseTextureGatherOffsets)
2241
bias = fnCandidate.getParamCount() > 5;
2242
2243
if (bias) {
2244
featureString = fnCandidate.getName();
2245
featureString += "with bias argument";
2246
feature = featureString.c_str();
2247
profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
2248
requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
2249
}
2250
// As per GL_ARB_sparse_texture2 extension "Offsets" parameter must be constant integral expression
2251
// for sparseTextureGatherOffsetsARB just as textureGatherOffsets
2252
if (callNode.getOp() == EOpSparseTextureGatherOffsets) {
2253
int offsetsArg = arg0->getType().getSampler().shadow ? 3 : 2;
2254
if (!(*argp)[offsetsArg]->getAsConstantUnion())
2255
error(loc, "argument must be compile-time constant", "offsets", "");
2256
}
2257
break;
2258
}
2259
2260
case EOpSparseTextureGatherLod:
2261
case EOpSparseTextureGatherLodOffset:
2262
case EOpSparseTextureGatherLodOffsets:
2263
{
2264
requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str());
2265
break;
2266
}
2267
2268
case EOpSwizzleInvocations:
2269
{
2270
if (! (*argp)[1]->getAsConstantUnion())
2271
error(loc, "argument must be compile-time constant", "offset", "");
2272
else {
2273
unsigned offset[4] = {};
2274
offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
2275
offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
2276
offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
2277
offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst();
2278
if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3)
2279
error(loc, "components must be in the range [0, 3]", "offset", "");
2280
}
2281
2282
break;
2283
}
2284
2285
case EOpSwizzleInvocationsMasked:
2286
{
2287
if (! (*argp)[1]->getAsConstantUnion())
2288
error(loc, "argument must be compile-time constant", "mask", "");
2289
else {
2290
unsigned mask[3] = {};
2291
mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
2292
mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
2293
mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
2294
if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31)
2295
error(loc, "components must be in the range [0, 31]", "mask", "");
2296
}
2297
2298
break;
2299
}
2300
2301
case EOpTextureOffset:
2302
case EOpTextureFetchOffset:
2303
case EOpTextureProjOffset:
2304
case EOpTextureLodOffset:
2305
case EOpTextureProjLodOffset:
2306
case EOpTextureGradOffset:
2307
case EOpTextureProjGradOffset:
2308
{
2309
// Handle texture-offset limits checking
2310
// Pick which argument has to hold constant offsets
2311
int arg = -1;
2312
switch (callNode.getOp()) {
2313
case EOpTextureOffset: arg = 2; break;
2314
case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().isRect()) ? 2 : 3; break;
2315
case EOpTextureProjOffset: arg = 2; break;
2316
case EOpTextureLodOffset: arg = 3; break;
2317
case EOpTextureProjLodOffset: arg = 3; break;
2318
case EOpTextureGradOffset: arg = 4; break;
2319
case EOpTextureProjGradOffset: arg = 4; break;
2320
default:
2321
assert(0);
2322
break;
2323
}
2324
2325
if (arg > 0) {
2326
2327
bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 &&
2328
arg0->getType().getSampler().shadow;
2329
if (f16ShadowCompare)
2330
++arg;
2331
if (! (*argp)[arg]->getAsTyped()->getQualifier().isConstant())
2332
error(loc, "argument must be compile-time constant", "texel offset", "");
2333
else if ((*argp)[arg]->getAsConstantUnion()) {
2334
const TType& type = (*argp)[arg]->getAsTyped()->getType();
2335
for (int c = 0; c < type.getVectorSize(); ++c) {
2336
int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
2337
if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
2338
error(loc, "value is out of range:", "texel offset",
2339
"[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
2340
}
2341
}
2342
2343
if (callNode.getOp() == EOpTextureOffset) {
2344
TSampler s = arg0->getType().getSampler();
2345
if (s.is2D() && s.isArrayed() && s.isShadow()) {
2346
if (
2347
((*argp)[1]->getAsTyped()->getType().getBasicType() == EbtFloat) &&
2348
((*argp)[1]->getAsTyped()->getType().getVectorSize() == 4) &&
2349
(fnCandidate.getParamCount() == 4)) {
2350
featureString = fnCandidate.getName() + " for sampler2DArrayShadow";
2351
feature = featureString.c_str();
2352
requireExtensions(loc, 1, &E_GL_EXT_texture_shadow_lod, feature);
2353
profileRequires(loc, EEsProfile, 300, nullptr, feature);
2354
profileRequires(loc, ~EEsProfile, 130, nullptr, feature);
2355
}
2356
else if (isEsProfile())
2357
error(loc, "TextureOffset does not support sampler2DArrayShadow : ", "sampler", "ES Profile");
2358
else if (version <= 420)
2359
error(loc, "TextureOffset does not support sampler2DArrayShadow : ", "sampler", "version <= 420");
2360
}
2361
}
2362
2363
if (callNode.getOp() == EOpTextureLodOffset) {
2364
TSampler s = arg0->getType().getSampler();
2365
if (s.is2D() && s.isArrayed() && s.isShadow() &&
2366
((*argp)[1]->getAsTyped()->getType().getBasicType() == EbtFloat) &&
2367
((*argp)[1]->getAsTyped()->getType().getVectorSize() == 4) &&
2368
(fnCandidate.getParamCount() == 4)) {
2369
featureString = fnCandidate.getName() + " for sampler2DArrayShadow";
2370
feature = featureString.c_str();
2371
profileRequires(loc, EEsProfile, 300, nullptr, feature);
2372
profileRequires(loc, ~EEsProfile, 130, nullptr, feature);
2373
requireExtensions(loc, 1, &E_GL_EXT_texture_shadow_lod, feature);
2374
}
2375
}
2376
}
2377
2378
break;
2379
}
2380
2381
case EOpTraceNV:
2382
if (!(*argp)[10]->getAsConstantUnion())
2383
error(loc, "argument must be compile-time constant", "payload number", "a");
2384
break;
2385
case EOpTraceRayMotionNV:
2386
if (!(*argp)[11]->getAsConstantUnion())
2387
error(loc, "argument must be compile-time constant", "payload number", "a");
2388
break;
2389
case EOpTraceKHR:
2390
if (!(*argp)[10]->getAsConstantUnion())
2391
error(loc, "argument must be compile-time constant", "payload number", "a");
2392
else {
2393
unsigned int location = (*argp)[10]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2394
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(0, location) < 0)
2395
error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location);
2396
}
2397
break;
2398
case EOpExecuteCallableNV:
2399
if (!(*argp)[1]->getAsConstantUnion())
2400
error(loc, "argument must be compile-time constant", "callable data number", "");
2401
break;
2402
case EOpExecuteCallableKHR:
2403
if (!(*argp)[1]->getAsConstantUnion())
2404
error(loc, "argument must be compile-time constant", "callable data number", "");
2405
else {
2406
unsigned int location = (*argp)[1]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2407
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(1, location) < 0)
2408
error(loc, "with layout(location =", "no callableDataEXT/callableDataInEXT declared", "%d)", location);
2409
}
2410
break;
2411
2412
case EOpHitObjectTraceRayNV:
2413
if (!(*argp)[11]->getAsConstantUnion())
2414
error(loc, "argument must be compile-time constant", "payload number", "");
2415
else {
2416
unsigned int location = (*argp)[11]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2417
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(0, location) < 0)
2418
error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location);
2419
}
2420
break;
2421
case EOpHitObjectTraceRayMotionNV:
2422
if (!(*argp)[12]->getAsConstantUnion())
2423
error(loc, "argument must be compile-time constant", "payload number", "");
2424
else {
2425
unsigned int location = (*argp)[12]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2426
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(0, location) < 0)
2427
error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location);
2428
}
2429
break;
2430
case EOpHitObjectExecuteShaderNV:
2431
if (!(*argp)[1]->getAsConstantUnion())
2432
error(loc, "argument must be compile-time constant", "payload number", "");
2433
else {
2434
unsigned int location = (*argp)[1]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2435
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(0, location) < 0)
2436
error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location);
2437
}
2438
break;
2439
case EOpHitObjectRecordHitNV:
2440
if (!(*argp)[12]->getAsConstantUnion())
2441
error(loc, "argument must be compile-time constant", "hitobjectattribute number", "");
2442
else {
2443
unsigned int location = (*argp)[12]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2444
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(2, location) < 0)
2445
error(loc, "with layout(location =", "no hitObjectAttributeNV declared", "%d)", location);
2446
}
2447
break;
2448
case EOpHitObjectRecordHitMotionNV:
2449
if (!(*argp)[13]->getAsConstantUnion())
2450
error(loc, "argument must be compile-time constant", "hitobjectattribute number", "");
2451
else {
2452
unsigned int location = (*argp)[13]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2453
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(2, location) < 0)
2454
error(loc, "with layout(location =", "no hitObjectAttributeNV declared", "%d)", location);
2455
}
2456
break;
2457
case EOpHitObjectRecordHitWithIndexNV:
2458
if (!(*argp)[11]->getAsConstantUnion())
2459
error(loc, "argument must be compile-time constant", "hitobjectattribute number", "");
2460
else {
2461
unsigned int location = (*argp)[11]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2462
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(2, location) < 0)
2463
error(loc, "with layout(location =", "no hitObjectAttributeNV declared", "%d)", location);
2464
}
2465
break;
2466
case EOpHitObjectRecordHitWithIndexMotionNV:
2467
if (!(*argp)[12]->getAsConstantUnion())
2468
error(loc, "argument must be compile-time constant", "hitobjectattribute number", "");
2469
else {
2470
unsigned int location = (*argp)[12]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2471
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(2, location) < 0)
2472
error(loc, "with layout(location =", "no hitObjectAttributeNV declared", "%d)", location);
2473
}
2474
break;
2475
case EOpHitObjectGetAttributesNV:
2476
if (!(*argp)[1]->getAsConstantUnion())
2477
error(loc, "argument must be compile-time constant", "hitobjectattribute number", "");
2478
else {
2479
unsigned int location = (*argp)[1]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
2480
if (!extensionTurnedOn(E_GL_EXT_spirv_intrinsics) && intermediate.checkLocationRT(2, location) < 0)
2481
error(loc, "with layout(location =", "no hitObjectAttributeNV declared", "%d)", location);
2482
}
2483
break;
2484
2485
case EOpRayQueryGetIntersectionType:
2486
case EOpRayQueryGetIntersectionT:
2487
case EOpRayQueryGetIntersectionInstanceCustomIndex:
2488
case EOpRayQueryGetIntersectionInstanceId:
2489
case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset:
2490
case EOpRayQueryGetIntersectionGeometryIndex:
2491
case EOpRayQueryGetIntersectionPrimitiveIndex:
2492
case EOpRayQueryGetIntersectionBarycentrics:
2493
case EOpRayQueryGetIntersectionFrontFace:
2494
case EOpRayQueryGetIntersectionObjectRayDirection:
2495
case EOpRayQueryGetIntersectionObjectRayOrigin:
2496
case EOpRayQueryGetIntersectionObjectToWorld:
2497
case EOpRayQueryGetIntersectionWorldToObject:
2498
case EOpRayQueryGetIntersectionTriangleVertexPositionsEXT:
2499
if (!(*argp)[1]->getAsConstantUnion())
2500
error(loc, "argument must be compile-time constant", "committed", "");
2501
break;
2502
2503
case EOpTextureQuerySamples:
2504
case EOpImageQuerySamples:
2505
// GL_ARB_shader_texture_image_samples
2506
profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
2507
break;
2508
2509
case EOpImageAtomicAdd:
2510
case EOpImageAtomicMin:
2511
case EOpImageAtomicMax:
2512
case EOpImageAtomicAnd:
2513
case EOpImageAtomicOr:
2514
case EOpImageAtomicXor:
2515
case EOpImageAtomicExchange:
2516
case EOpImageAtomicCompSwap:
2517
case EOpImageAtomicLoad:
2518
case EOpImageAtomicStore:
2519
{
2520
// Make sure the image types have the correct layout() format and correct argument types
2521
const TType& imageType = arg0->getType();
2522
if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint ||
2523
imageType.getSampler().type == EbtInt64 || imageType.getSampler().type == EbtUint64) {
2524
if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui &&
2525
imageType.getQualifier().getFormat() != ElfR64i && imageType.getQualifier().getFormat() != ElfR64ui)
2526
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
2527
if (callNode.getType().getBasicType() == EbtInt64 && imageType.getQualifier().getFormat() != ElfR64i)
2528
error(loc, "only supported on image with format r64i", fnCandidate.getName().c_str(), "");
2529
else if (callNode.getType().getBasicType() == EbtUint64 && imageType.getQualifier().getFormat() != ElfR64ui)
2530
error(loc, "only supported on image with format r64ui", fnCandidate.getName().c_str(), "");
2531
} else if(callNode.getType().getBasicType() == EbtFloat16 &&
2532
((callNode.getType().getVectorSize() == 2 && arg0->getType().getQualifier().getFormat() == ElfRg16f) ||
2533
(callNode.getType().getVectorSize() == 4 && arg0->getType().getQualifier().getFormat() == ElfRgba16f))) {
2534
if (StartsWith(fnCandidate.getName(), "imageAtomicAdd") ||
2535
StartsWith(fnCandidate.getName(), "imageAtomicExchange") ||
2536
StartsWith(fnCandidate.getName(), "imageAtomicMin") ||
2537
StartsWith(fnCandidate.getName(), "imageAtomicMax")) {
2538
requireExtensions(loc, 1, &E_GL_NV_shader_atomic_fp16_vector, fnCandidate.getName().c_str());
2539
} else {
2540
error(loc, "f16vec2/4 operation not supported on: ", fnCandidate.getName().c_str(), "");
2541
}
2542
} else if (imageType.getSampler().type == EbtFloat) {
2543
if (StartsWith(fnCandidate.getName(), "imageAtomicExchange")) {
2544
// imageAtomicExchange doesn't require an extension
2545
} else if (StartsWith(fnCandidate.getName(), "imageAtomicAdd") ||
2546
StartsWith(fnCandidate.getName(), "imageAtomicLoad") ||
2547
StartsWith(fnCandidate.getName(), "imageAtomicStore")) {
2548
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
2549
} else if (StartsWith(fnCandidate.getName(), "imageAtomicMin") ||
2550
StartsWith(fnCandidate.getName(), "imageAtomicMax")) {
2551
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float2, fnCandidate.getName().c_str());
2552
} else {
2553
error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
2554
}
2555
if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile())
2556
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
2557
} else {
2558
error(loc, "not supported on this image type", fnCandidate.getName().c_str(), "");
2559
}
2560
2561
const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4;
2562
if (argp->size() > maxArgs) {
2563
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
2564
memorySemanticsCheck(loc, fnCandidate, callNode);
2565
}
2566
2567
break;
2568
}
2569
2570
case EOpAtomicAdd:
2571
case EOpAtomicSubtract:
2572
case EOpAtomicMin:
2573
case EOpAtomicMax:
2574
case EOpAtomicAnd:
2575
case EOpAtomicOr:
2576
case EOpAtomicXor:
2577
case EOpAtomicExchange:
2578
case EOpAtomicCompSwap:
2579
case EOpAtomicLoad:
2580
case EOpAtomicStore:
2581
{
2582
if (argp->size() > 3) {
2583
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
2584
memorySemanticsCheck(loc, fnCandidate, callNode);
2585
if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange ||
2586
callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpAtomicStore) &&
2587
(arg0->getType().getBasicType() == EbtFloat ||
2588
arg0->getType().getBasicType() == EbtDouble)) {
2589
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
2590
} else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange ||
2591
callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpAtomicStore ||
2592
callNode.getOp() == EOpAtomicMin || callNode.getOp() == EOpAtomicMax) &&
2593
arg0->getType().isFloatingDomain()) {
2594
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float2, fnCandidate.getName().c_str());
2595
}
2596
} else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) {
2597
const char* const extensions[2] = { E_GL_NV_shader_atomic_int64,
2598
E_GL_EXT_shader_atomic_int64 };
2599
requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str());
2600
} else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange ||
2601
callNode.getOp() == EOpAtomicMin || callNode.getOp() == EOpAtomicMax) &&
2602
arg0->getType().getBasicType() == EbtFloat16 &&
2603
(arg0->getType().getVectorSize() == 2 || arg0->getType().getVectorSize() == 4 )) {
2604
requireExtensions(loc, 1, &E_GL_NV_shader_atomic_fp16_vector, fnCandidate.getName().c_str());
2605
} else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange) &&
2606
(arg0->getType().getBasicType() == EbtFloat ||
2607
arg0->getType().getBasicType() == EbtDouble)) {
2608
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str());
2609
} else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange ||
2610
callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpAtomicStore ||
2611
callNode.getOp() == EOpAtomicMin || callNode.getOp() == EOpAtomicMax) &&
2612
arg0->getType().isFloatingDomain()) {
2613
requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float2, fnCandidate.getName().c_str());
2614
}
2615
2616
const TIntermTyped* base = TIntermediate::traverseLValueBase(arg0, true, true);
2617
const char* errMsg = "Only l-values corresponding to shader block storage or shared variables can be used with "
2618
"atomic memory functions.";
2619
if (base) {
2620
const TType* refType = (base->getType().isReference()) ? base->getType().getReferentType() : nullptr;
2621
const TQualifier& qualifier =
2622
(refType != nullptr) ? refType->getQualifier() : base->getType().getQualifier();
2623
if (qualifier.storage != EvqShared && qualifier.storage != EvqBuffer &&
2624
qualifier.storage != EvqtaskPayloadSharedEXT)
2625
error(loc, errMsg, fnCandidate.getName().c_str(), "");
2626
} else {
2627
error(loc, errMsg, fnCandidate.getName().c_str(), "");
2628
}
2629
2630
break;
2631
}
2632
2633
case EOpInterpolateAtCentroid:
2634
case EOpInterpolateAtSample:
2635
case EOpInterpolateAtOffset:
2636
case EOpInterpolateAtVertex: {
2637
if (arg0->getType().getQualifier().storage != EvqVaryingIn) {
2638
// Traverse down the left branch of arg0 to ensure this argument is a valid interpolant.
2639
//
2640
// For desktop GL >4.3 we effectively only need to ensure that arg0 represents an l-value from an
2641
// input declaration.
2642
//
2643
// For desktop GL <= 4.3 and ES, we must also ensure that swizzling is not used
2644
//
2645
// For ES, we must also ensure that a field selection operator (i.e., '.') is not used on a named
2646
// struct.
2647
2648
const bool esProfile = isEsProfile();
2649
const bool swizzleOkay = !esProfile && (version >= 440);
2650
2651
std::string interpolantErrorMsg = "first argument must be an interpolant, or interpolant-array element";
2652
bool isValid = true; // Assume that the interpolant is valid until we find a condition making it invalid
2653
bool isIn = false; // Checks whether or not the interpolant is a shader input
2654
bool structAccessOp = false; // Whether or not the previous node in the chain is a struct accessor
2655
TIntermediate::traverseLValueBase(
2656
arg0, swizzleOkay, false,
2657
[&isValid, &isIn, &interpolantErrorMsg, esProfile, &structAccessOp](const TIntermNode& n) -> bool {
2658
auto* type = n.getAsTyped();
2659
if (type) {
2660
if (type->getType().getQualifier().storage == EvqVaryingIn) {
2661
isIn = true;
2662
}
2663
// If a field accessor was used, it can only be used to access a field with an input block, not a struct.
2664
if (structAccessOp && (type->getType().getBasicType() != EbtBlock)) {
2665
interpolantErrorMsg +=
2666
". Using the field of a named struct as an interpolant argument is not "
2667
"allowed (ES-only).";
2668
isValid = false;
2669
}
2670
}
2671
2672
// ES has different requirements for interpolants than GL
2673
if (esProfile) {
2674
// Swizzling will be taken care of by the 'swizzleOkay' argument passsed to traverseLValueBase,
2675
// so we only ned to check whether or not a field accessor has been used with a named struct.
2676
auto* binary = n.getAsBinaryNode();
2677
if (binary && (binary->getOp() == EOpIndexDirectStruct)) {
2678
structAccessOp = true;
2679
}
2680
}
2681
// Don't continue traversing if we know we have an invalid interpolant at this point.
2682
return isValid;
2683
});
2684
if (!isIn || !isValid) {
2685
error(loc, interpolantErrorMsg.c_str(), fnCandidate.getName().c_str(), "");
2686
}
2687
}
2688
2689
if (callNode.getOp() == EOpInterpolateAtVertex) {
2690
if (!arg0->getType().getQualifier().isExplicitInterpolation())
2691
error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", "");
2692
else {
2693
if (! (*argp)[1]->getAsConstantUnion())
2694
error(loc, "argument must be compile-time constant", "vertex index", "");
2695
else {
2696
unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
2697
if (vertexIdx > 2)
2698
error(loc, "must be in the range [0, 2]", "vertex index", "");
2699
}
2700
}
2701
}
2702
} break;
2703
2704
case EOpEmitStreamVertex:
2705
case EOpEndStreamPrimitive:
2706
if (version == 150)
2707
requireExtensions(loc, 1, &E_GL_ARB_gpu_shader5, "if the verison is 150 , the EmitStreamVertex and EndStreamPrimitive only support at extension GL_ARB_gpu_shader5");
2708
intermediate.setMultiStream();
2709
break;
2710
2711
case EOpSubgroupClusteredAdd:
2712
case EOpSubgroupClusteredMul:
2713
case EOpSubgroupClusteredMin:
2714
case EOpSubgroupClusteredMax:
2715
case EOpSubgroupClusteredAnd:
2716
case EOpSubgroupClusteredOr:
2717
case EOpSubgroupClusteredXor:
2718
// The <clusterSize> as used in the subgroupClustered<op>() operations must be:
2719
// - An integral constant expression.
2720
// - At least 1.
2721
// - A power of 2.
2722
if ((*argp)[1]->getAsConstantUnion() == nullptr)
2723
error(loc, "argument must be compile-time constant", "cluster size", "");
2724
else {
2725
int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
2726
if (size < 1)
2727
error(loc, "argument must be at least 1", "cluster size", "");
2728
else if (!IsPow2(size))
2729
error(loc, "argument must be a power of 2", "cluster size", "");
2730
}
2731
break;
2732
2733
case EOpSubgroupBroadcast:
2734
case EOpSubgroupQuadBroadcast:
2735
if (spvVersion.spv < EShTargetSpv_1_5) {
2736
// <id> must be an integral constant expression.
2737
if ((*argp)[1]->getAsConstantUnion() == nullptr)
2738
error(loc, "argument must be compile-time constant", "id", "");
2739
}
2740
break;
2741
2742
case EOpBarrier:
2743
case EOpMemoryBarrier:
2744
if (argp->size() > 0) {
2745
requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
2746
memorySemanticsCheck(loc, fnCandidate, callNode);
2747
}
2748
break;
2749
2750
case EOpMix:
2751
if (profile == EEsProfile && version < 310) {
2752
// Look for specific signatures
2753
if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat &&
2754
(*argp)[1]->getAsTyped()->getBasicType() != EbtFloat &&
2755
(*argp)[2]->getAsTyped()->getBasicType() == EbtBool) {
2756
requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, "specific signature of builtin mix");
2757
}
2758
}
2759
2760
if (profile != EEsProfile && version < 450) {
2761
if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat &&
2762
(*argp)[0]->getAsTyped()->getBasicType() != EbtDouble &&
2763
(*argp)[1]->getAsTyped()->getBasicType() != EbtFloat &&
2764
(*argp)[1]->getAsTyped()->getBasicType() != EbtDouble &&
2765
(*argp)[2]->getAsTyped()->getBasicType() == EbtBool) {
2766
requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, fnCandidate.getName().c_str());
2767
}
2768
}
2769
2770
break;
2771
2772
default:
2773
break;
2774
}
2775
2776
// Texture operations on texture objects (aside from texelFetch on a
2777
// textureBuffer) require EXT_samplerless_texture_functions.
2778
switch (callNode.getOp()) {
2779
case EOpTextureQuerySize:
2780
case EOpTextureQueryLevels:
2781
case EOpTextureQuerySamples:
2782
case EOpTextureFetch:
2783
case EOpTextureFetchOffset:
2784
{
2785
const TSampler& sampler = fnCandidate[0].type->getSampler();
2786
2787
const bool isTexture = sampler.isTexture() && !sampler.isCombined();
2788
const bool isBuffer = sampler.isBuffer();
2789
const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset;
2790
2791
if (isTexture && (!isBuffer || !isFetch))
2792
requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str());
2793
2794
break;
2795
}
2796
2797
default:
2798
break;
2799
}
2800
2801
if (callNode.isSubgroup()) {
2802
// these require SPIR-V 1.3
2803
if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3)
2804
error(loc, "requires SPIR-V 1.3", "subgroup op", "");
2805
2806
// Check that if extended types are being used that the correct extensions are enabled.
2807
if (arg0 != nullptr) {
2808
const TType& type = arg0->getType();
2809
bool enhanced = intermediate.getEnhancedMsgs();
2810
switch (type.getBasicType()) {
2811
default:
2812
break;
2813
case EbtInt8:
2814
case EbtUint8:
2815
requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int8, type.getCompleteString(enhanced).c_str());
2816
break;
2817
case EbtInt16:
2818
case EbtUint16:
2819
requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int16, type.getCompleteString(enhanced).c_str());
2820
break;
2821
case EbtInt64:
2822
case EbtUint64:
2823
requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int64, type.getCompleteString(enhanced).c_str());
2824
break;
2825
case EbtFloat16:
2826
requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_float16, type.getCompleteString(enhanced).c_str());
2827
break;
2828
}
2829
}
2830
}
2831
}
2832
2833
2834
// Deprecated! Use PureOperatorBuiltins == true instead, in which case this
2835
// functionality is handled in builtInOpCheck() instead of here.
2836
//
2837
// Do additional checking of built-in function calls that were not mapped
2838
// to built-in operations (e.g., texturing functions).
2839
//
2840
// Assumes there has been a semantically correct match to a built-in function.
2841
//
2842
void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode)
2843
{
2844
// Further maintenance of this function is deprecated, because the "correct"
2845
// future-oriented design is to not have to do string compares on function names.
2846
2847
// If PureOperatorBuiltins == true, then all built-ins should be mapped
2848
// to a TOperator, and this function would then never get called.
2849
2850
assert(PureOperatorBuiltins == false);
2851
2852
// built-in texturing functions get their return value precision from the precision of the sampler
2853
if (fnCandidate.getType().getQualifier().precision == EpqNone &&
2854
fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler)
2855
callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision;
2856
2857
if (fnCandidate.getName().compare(0, 7, "texture") == 0) {
2858
if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) {
2859
TString featureString = fnCandidate.getName() + "(...)";
2860
const char* feature = featureString.c_str();
2861
profileRequires(loc, EEsProfile, 310, nullptr, feature);
2862
2863
int compArg = -1; // track which argument, if any, is the constant component argument
2864
if (fnCandidate.getName().compare("textureGatherOffset") == 0) {
2865
// GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
2866
if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
2867
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
2868
else
2869
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2870
int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
2871
if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
2872
profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
2873
"non-constant offset argument");
2874
if (! fnCandidate[0].type->getSampler().shadow)
2875
compArg = 3;
2876
} else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) {
2877
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2878
if (! fnCandidate[0].type->getSampler().shadow)
2879
compArg = 3;
2880
// check for constant offsets
2881
int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
2882
if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
2883
error(loc, "must be a compile-time constant:", feature, "offsets argument");
2884
} else if (fnCandidate.getName().compare("textureGather") == 0) {
2885
// More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
2886
// otherwise, need GL_ARB_texture_gather.
2887
if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
2888
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
2889
if (! fnCandidate[0].type->getSampler().shadow)
2890
compArg = 2;
2891
} else
2892
profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
2893
}
2894
2895
if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
2896
if (callNode.getSequence()[compArg]->getAsConstantUnion()) {
2897
int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
2898
if (value < 0 || value > 3)
2899
error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
2900
} else
2901
error(loc, "must be a compile-time constant:", feature, "component argument");
2902
}
2903
} else {
2904
// this is only for functions not starting "textureGather"...
2905
if (fnCandidate.getName().find("Offset") != TString::npos) {
2906
2907
// Handle texture-offset limits checking
2908
int arg = -1;
2909
if (fnCandidate.getName().compare("textureOffset") == 0)
2910
arg = 2;
2911
else if (fnCandidate.getName().compare("texelFetchOffset") == 0)
2912
arg = 3;
2913
else if (fnCandidate.getName().compare("textureProjOffset") == 0)
2914
arg = 2;
2915
else if (fnCandidate.getName().compare("textureLodOffset") == 0)
2916
arg = 3;
2917
else if (fnCandidate.getName().compare("textureProjLodOffset") == 0)
2918
arg = 3;
2919
else if (fnCandidate.getName().compare("textureGradOffset") == 0)
2920
arg = 4;
2921
else if (fnCandidate.getName().compare("textureProjGradOffset") == 0)
2922
arg = 4;
2923
2924
if (arg > 0) {
2925
if (! callNode.getSequence()[arg]->getAsConstantUnion())
2926
error(loc, "argument must be compile-time constant", "texel offset", "");
2927
else {
2928
const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType();
2929
for (int c = 0; c < type.getVectorSize(); ++c) {
2930
int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
2931
if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
2932
error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
2933
}
2934
}
2935
}
2936
}
2937
}
2938
}
2939
2940
// GL_ARB_shader_texture_image_samples
2941
if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0)
2942
profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
2943
2944
if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) {
2945
const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType();
2946
if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
2947
if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui)
2948
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
2949
} else {
2950
if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
2951
error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
2952
else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile())
2953
error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
2954
}
2955
}
2956
}
2957
2958
//
2959
// Do any extra checking for a user function call.
2960
//
2961
void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode)
2962
{
2963
TIntermSequence& arguments = callNode.getSequence();
2964
2965
for (int i = 0; i < (int)arguments.size(); ++i)
2966
samplerConstructorLocationCheck(loc, "call argument", arguments[i]);
2967
}
2968
2969
//
2970
// Emit an error if this is a sampler constructor
2971
//
2972
void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node)
2973
{
2974
if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
2975
error(loc, "sampler constructor must appear at point of use", token, "");
2976
}
2977
2978
//
2979
// Handle seeing a built-in constructor in a grammar production.
2980
//
2981
TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType)
2982
{
2983
TType type(publicType);
2984
type.getQualifier().precision = EpqNone;
2985
2986
if (type.isArray()) {
2987
profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor");
2988
profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor");
2989
}
2990
2991
// Reuse EOpConstructTextureSampler for bindless image constructor
2992
// uvec2 imgHandle;
2993
// imageLoad(image1D(imgHandle), 0);
2994
if (type.isImage() && extensionTurnedOn(E_GL_ARB_bindless_texture))
2995
{
2996
intermediate.setBindlessImageMode(currentCaller, AstRefTypeFunc);
2997
}
2998
2999
TOperator op = intermediate.mapTypeToConstructorOp(type);
3000
3001
if (op == EOpNull) {
3002
if (intermediate.getEnhancedMsgs() && type.getBasicType() == EbtSampler)
3003
error(loc, "function not supported in this version; use texture() instead", "texture*D*", "");
3004
else
3005
error(loc, "cannot construct this type", type.getBasicString(), "");
3006
op = EOpConstructFloat;
3007
TType errorType(EbtFloat);
3008
type.shallowCopy(errorType);
3009
}
3010
3011
TString empty("");
3012
3013
return new TFunction(&empty, type, op);
3014
}
3015
3016
// Handle seeing a precision qualifier in the grammar.
3017
void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision)
3018
{
3019
if (obeyPrecisionQualifiers())
3020
qualifier.precision = precision;
3021
}
3022
3023
// Check for messages to give on seeing a precision qualifier used in a
3024
// declaration in the grammar.
3025
void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier)
3026
{
3027
if (precisionManager.shouldWarnAboutDefaults()) {
3028
warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n"
3029
" \"precision mediump int; precision highp float;\"", "", "");
3030
precisionManager.defaultWarningGiven();
3031
}
3032
}
3033
3034
//
3035
// Same error message for all places assignments don't work.
3036
//
3037
void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right)
3038
{
3039
error(loc, "", op, "cannot convert from '%s' to '%s'",
3040
right.c_str(), left.c_str());
3041
}
3042
3043
//
3044
// Same error message for all places unary operations don't work.
3045
//
3046
void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand)
3047
{
3048
error(loc, " wrong operand type", op,
3049
"no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)",
3050
op, operand.c_str());
3051
}
3052
3053
//
3054
// Same error message for all binary operations don't work.
3055
//
3056
void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right)
3057
{
3058
error(loc, " wrong operand types:", op,
3059
"no operation '%s' exists that takes a left-hand operand of type '%s' and "
3060
"a right operand of type '%s' (or there is no acceptable conversion)",
3061
op, left.c_str(), right.c_str());
3062
}
3063
3064
//
3065
// A basic type of EbtVoid is a key that the name string was seen in the source, but
3066
// it was not found as a variable in the symbol table. If so, give the error
3067
// message and insert a dummy variable in the symbol table to prevent future errors.
3068
//
3069
void TParseContext::variableCheck(TIntermTyped*& nodePtr)
3070
{
3071
TIntermSymbol* symbol = nodePtr->getAsSymbolNode();
3072
if (! symbol)
3073
return;
3074
3075
if (symbol->getType().getBasicType() == EbtVoid) {
3076
const char *extraInfoFormat = "";
3077
if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") {
3078
extraInfoFormat = "(Did you mean gl_VertexIndex?)";
3079
} else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") {
3080
extraInfoFormat = "(Did you mean gl_InstanceIndex?)";
3081
}
3082
error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat);
3083
3084
// Add to symbol table to prevent future error messages on the same name
3085
if (symbol->getName().size() > 0) {
3086
TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat));
3087
symbolTable.insert(*fakeVariable);
3088
3089
// substitute a symbol node for this new variable
3090
nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc());
3091
}
3092
} else {
3093
switch (symbol->getQualifier().storage) {
3094
case EvqPointCoord:
3095
profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord");
3096
break;
3097
default: break; // some compilers want this
3098
}
3099
}
3100
}
3101
3102
//
3103
// Both test and if necessary, spit out an error, to see if the node is really
3104
// an l-value that can be operated on this way.
3105
//
3106
// Returns true if there was an error.
3107
//
3108
bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
3109
{
3110
TIntermBinary* binaryNode = node->getAsBinaryNode();
3111
3112
if (binaryNode) {
3113
bool errorReturn = false;
3114
3115
switch(binaryNode->getOp()) {
3116
case EOpIndexDirect:
3117
case EOpIndexIndirect:
3118
// ... tessellation control shader ...
3119
// If a per-vertex output variable is used as an l-value, it is a
3120
// compile-time or link-time error if the expression indicating the
3121
// vertex index is not the identifier gl_InvocationID.
3122
if (language == EShLangTessControl) {
3123
const TType& leftType = binaryNode->getLeft()->getType();
3124
if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) {
3125
// we have a per-vertex output
3126
const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode();
3127
if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId)
3128
error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", "");
3129
}
3130
}
3131
break; // left node is checked by base class
3132
case EOpVectorSwizzle:
3133
errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft());
3134
if (!errorReturn) {
3135
int offset[4] = {0,0,0,0};
3136
3137
TIntermTyped* rightNode = binaryNode->getRight();
3138
TIntermAggregate *aggrNode = rightNode->getAsAggregate();
3139
3140
for (TIntermSequence::iterator p = aggrNode->getSequence().begin();
3141
p != aggrNode->getSequence().end(); p++) {
3142
int value = (*p)->getAsTyped()->getAsConstantUnion()->getConstArray()[0].getIConst();
3143
offset[value]++;
3144
if (offset[value] > 1) {
3145
error(loc, " l-value of swizzle cannot have duplicate components", op, "", "");
3146
3147
return true;
3148
}
3149
}
3150
}
3151
3152
return errorReturn;
3153
default:
3154
break;
3155
}
3156
3157
if (errorReturn) {
3158
error(loc, " l-value required", op, "", "");
3159
return true;
3160
}
3161
}
3162
3163
if (binaryNode && binaryNode->getOp() == EOpIndexDirectStruct && binaryNode->getLeft()->isReference())
3164
return false;
3165
3166
// Let the base class check errors
3167
if (TParseContextBase::lValueErrorCheck(loc, op, node))
3168
return true;
3169
3170
const char* symbol = nullptr;
3171
TIntermSymbol* symNode = node->getAsSymbolNode();
3172
if (symNode != nullptr)
3173
symbol = symNode->getName().c_str();
3174
3175
const char* message = nullptr;
3176
switch (node->getQualifier().storage) {
3177
case EvqVaryingIn: message = "can't modify shader input"; break;
3178
case EvqInstanceId: message = "can't modify gl_InstanceID"; break;
3179
case EvqVertexId: message = "can't modify gl_VertexID"; break;
3180
case EvqFace: message = "can't modify gl_FrontFace"; break;
3181
case EvqFragCoord: message = "can't modify gl_FragCoord"; break;
3182
case EvqPointCoord: message = "can't modify gl_PointCoord"; break;
3183
case EvqFragDepth:
3184
intermediate.setDepthReplacing();
3185
// "In addition, it is an error to statically write to gl_FragDepth in the fragment shader."
3186
if (isEsProfile() && intermediate.getEarlyFragmentTests())
3187
message = "can't modify gl_FragDepth if using early_fragment_tests";
3188
break;
3189
case EvqFragStencil:
3190
intermediate.setStencilReplacing();
3191
// "In addition, it is an error to statically write to gl_FragDepth in the fragment shader."
3192
if (isEsProfile() && intermediate.getEarlyFragmentTests())
3193
message = "can't modify EvqFragStencil if using early_fragment_tests";
3194
break;
3195
3196
case EvqtaskPayloadSharedEXT:
3197
if (language == EShLangMesh)
3198
message = "can't modify variable with storage qualifier taskPayloadSharedEXT in mesh shaders";
3199
break;
3200
default:
3201
break;
3202
}
3203
3204
if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
3205
error(loc, " l-value required", op, "", "");
3206
3207
return true;
3208
}
3209
3210
//
3211
// Everything else is okay, no error.
3212
//
3213
if (message == nullptr)
3214
return false;
3215
3216
//
3217
// If we get here, we have an error and a message.
3218
//
3219
if (symNode)
3220
error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
3221
else
3222
error(loc, " l-value required", op, "(%s)", message);
3223
3224
return true;
3225
}
3226
3227
// Test for and give an error if the node can't be read from.
3228
void TParseContext::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
3229
{
3230
// Let the base class check errors
3231
TParseContextBase::rValueErrorCheck(loc, op, node);
3232
3233
TIntermSymbol* symNode = node->getAsSymbolNode();
3234
if (!(symNode && symNode->getQualifier().isWriteOnly())) // base class checks
3235
if (symNode && symNode->getQualifier().isExplicitInterpolation())
3236
error(loc, "can't read from explicitly-interpolated object: ", op, symNode->getName().c_str());
3237
3238
// local_size_{xyz} must be assigned or specialized before gl_WorkGroupSize can be assigned.
3239
if(node->getQualifier().builtIn == EbvWorkGroupSize &&
3240
!(intermediate.isLocalSizeSet() || intermediate.isLocalSizeSpecialized()))
3241
error(loc, "can't read from gl_WorkGroupSize before a fixed workgroup size has been declared", op, "");
3242
}
3243
3244
//
3245
// Both test, and if necessary spit out an error, to see if the node is really
3246
// a constant.
3247
//
3248
void TParseContext::constantValueCheck(TIntermTyped* node, const char* token)
3249
{
3250
if (! node->getQualifier().isConstant())
3251
error(node->getLoc(), "constant expression required", token, "");
3252
}
3253
3254
//
3255
// Both test, and if necessary spit out an error, to see if the node is really
3256
// a 32-bit integer or can implicitly convert to one.
3257
//
3258
void TParseContext::integerCheck(const TIntermTyped* node, const char* token)
3259
{
3260
auto from_type = node->getBasicType();
3261
if ((from_type == EbtInt || from_type == EbtUint ||
3262
intermediate.canImplicitlyPromote(from_type, EbtInt, EOpNull) ||
3263
intermediate.canImplicitlyPromote(from_type, EbtUint, EOpNull)) && node->isScalar())
3264
return;
3265
3266
error(node->getLoc(), "scalar integer expression required", token, "");
3267
}
3268
3269
//
3270
// Both test, and if necessary spit out an error, to see if we are currently
3271
// globally scoped.
3272
//
3273
void TParseContext::globalCheck(const TSourceLoc& loc, const char* token)
3274
{
3275
if (! symbolTable.atGlobalLevel())
3276
error(loc, "not allowed in nested scope", token, "");
3277
}
3278
3279
//
3280
// Reserved errors for GLSL.
3281
//
3282
void TParseContext::reservedErrorCheck(const TSourceLoc& loc, const TString& identifier)
3283
{
3284
// "Identifiers starting with "gl_" are reserved for use by OpenGL, and may not be
3285
// declared in a shader; this results in a compile-time error."
3286
if (! symbolTable.atBuiltInLevel()) {
3287
if (builtInName(identifier) && !extensionTurnedOn(E_GL_EXT_spirv_intrinsics))
3288
// The extension GL_EXT_spirv_intrinsics allows us to declare identifiers starting with "gl_".
3289
error(loc, "identifiers starting with \"gl_\" are reserved", identifier.c_str(), "");
3290
3291
// "__" are not supposed to be an error. ES 300 (and desktop) added the clarification:
3292
// "In addition, all identifiers containing two consecutive underscores (__) are
3293
// reserved; using such a name does not itself result in an error, but may result
3294
// in undefined behavior."
3295
// however, before that, ES tests required an error.
3296
if (identifier.find("__") != TString::npos && !extensionTurnedOn(E_GL_EXT_spirv_intrinsics)) {
3297
// The extension GL_EXT_spirv_intrinsics allows us to declare identifiers starting with "__".
3298
if (isEsProfile() && version < 300)
3299
error(loc, "identifiers containing consecutive underscores (\"__\") are reserved, and an error if version < 300", identifier.c_str(), "");
3300
else
3301
warn(loc, "identifiers containing consecutive underscores (\"__\") are reserved", identifier.c_str(), "");
3302
}
3303
}
3304
}
3305
3306
//
3307
// Reserved errors for the preprocessor.
3308
//
3309
void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* identifier, const char* op)
3310
{
3311
// "__" are not supposed to be an error. ES 300 (and desktop) added the clarification:
3312
// "All macro names containing two consecutive underscores ( __ ) are reserved;
3313
// defining such a name does not itself result in an error, but may result in
3314
// undefined behavior. All macro names prefixed with "GL_" ("GL" followed by a
3315
// single underscore) are also reserved, and defining such a name results in a
3316
// compile-time error."
3317
// however, before that, ES tests required an error.
3318
if (strncmp(identifier, "GL_", 3) == 0 && !extensionTurnedOn(E_GL_EXT_spirv_intrinsics))
3319
// The extension GL_EXT_spirv_intrinsics allows us to declare macros prefixed with "GL_".
3320
ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier);
3321
else if (strncmp(identifier, "defined", 8) == 0)
3322
if (relaxedErrors())
3323
ppWarn(loc, "\"defined\" is (un)defined:", op, identifier);
3324
else
3325
ppError(loc, "\"defined\" can't be (un)defined:", op, identifier);
3326
else if (strstr(identifier, "__") != nullptr && !extensionTurnedOn(E_GL_EXT_spirv_intrinsics)) {
3327
// The extension GL_EXT_spirv_intrinsics allows us to declare macros prefixed with "__".
3328
if (isEsProfile() && version >= 300 &&
3329
(strcmp(identifier, "__LINE__") == 0 ||
3330
strcmp(identifier, "__FILE__") == 0 ||
3331
strcmp(identifier, "__VERSION__") == 0))
3332
ppError(loc, "predefined names can't be (un)defined:", op, identifier);
3333
else {
3334
if (isEsProfile() && version < 300 && !relaxedErrors())
3335
ppError(loc, "names containing consecutive underscores are reserved, and an error if version < 300:", op, identifier);
3336
else
3337
ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier);
3338
}
3339
}
3340
}
3341
3342
//
3343
// See if this version/profile allows use of the line-continuation character '\'.
3344
//
3345
// Returns true if a line continuation should be done.
3346
//
3347
bool TParseContext::lineContinuationCheck(const TSourceLoc& loc, bool endOfComment)
3348
{
3349
const char* message = "line continuation";
3350
3351
bool lineContinuationAllowed = (isEsProfile() && version >= 300) ||
3352
(!isEsProfile() && (version >= 420 || extensionTurnedOn(E_GL_ARB_shading_language_420pack)));
3353
3354
if (endOfComment) {
3355
if (lineContinuationAllowed)
3356
warn(loc, "used at end of comment; the following line is still part of the comment", message, "");
3357
else
3358
warn(loc, "used at end of comment, but this version does not provide line continuation", message, "");
3359
3360
return lineContinuationAllowed;
3361
}
3362
3363
if (relaxedErrors()) {
3364
if (! lineContinuationAllowed)
3365
warn(loc, "not allowed in this version", message, "");
3366
return true;
3367
} else {
3368
profileRequires(loc, EEsProfile, 300, nullptr, message);
3369
profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, message);
3370
}
3371
3372
return lineContinuationAllowed;
3373
}
3374
3375
bool TParseContext::builtInName(const TString& identifier)
3376
{
3377
return identifier.compare(0, 3, "gl_") == 0;
3378
}
3379
3380
//
3381
// Make sure there is enough data and not too many arguments provided to the
3382
// constructor to build something of the type of the constructor. Also returns
3383
// the type of the constructor.
3384
//
3385
// Part of establishing type is establishing specialization-constness.
3386
// We don't yet know "top down" whether type is a specialization constant,
3387
// but a const constructor can becomes a specialization constant if any of
3388
// its children are, subject to KHR_vulkan_glsl rules:
3389
//
3390
// - int(), uint(), and bool() constructors for type conversions
3391
// from any of the following types to any of the following types:
3392
// * int
3393
// * uint
3394
// * bool
3395
// - vector versions of the above conversion constructors
3396
//
3397
// Returns true if there was an error in construction.
3398
//
3399
bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function, TOperator op, TType& type)
3400
{
3401
// See if the constructor does not establish the main type, only requalifies
3402
// it, in which case the type comes from the argument instead of from the
3403
// constructor function.
3404
switch (op) {
3405
case EOpConstructNonuniform:
3406
if (node != nullptr && node->getAsTyped() != nullptr) {
3407
type.shallowCopy(node->getAsTyped()->getType());
3408
type.getQualifier().makeTemporary();
3409
type.getQualifier().nonUniform = true;
3410
}
3411
break;
3412
default:
3413
type.shallowCopy(function.getType());
3414
break;
3415
}
3416
3417
TString constructorString;
3418
if (intermediate.getEnhancedMsgs())
3419
constructorString.append(type.getCompleteString(true, false, false, true)).append(" constructor");
3420
else
3421
constructorString.append("constructor");
3422
3423
// See if it's a matrix
3424
bool constructingMatrix = false;
3425
switch (op) {
3426
case EOpConstructTextureSampler:
3427
return constructorTextureSamplerError(loc, function);
3428
case EOpConstructMat2x2:
3429
case EOpConstructMat2x3:
3430
case EOpConstructMat2x4:
3431
case EOpConstructMat3x2:
3432
case EOpConstructMat3x3:
3433
case EOpConstructMat3x4:
3434
case EOpConstructMat4x2:
3435
case EOpConstructMat4x3:
3436
case EOpConstructMat4x4:
3437
case EOpConstructDMat2x2:
3438
case EOpConstructDMat2x3:
3439
case EOpConstructDMat2x4:
3440
case EOpConstructDMat3x2:
3441
case EOpConstructDMat3x3:
3442
case EOpConstructDMat3x4:
3443
case EOpConstructDMat4x2:
3444
case EOpConstructDMat4x3:
3445
case EOpConstructDMat4x4:
3446
case EOpConstructF16Mat2x2:
3447
case EOpConstructF16Mat2x3:
3448
case EOpConstructF16Mat2x4:
3449
case EOpConstructF16Mat3x2:
3450
case EOpConstructF16Mat3x3:
3451
case EOpConstructF16Mat3x4:
3452
case EOpConstructF16Mat4x2:
3453
case EOpConstructF16Mat4x3:
3454
case EOpConstructF16Mat4x4:
3455
constructingMatrix = true;
3456
break;
3457
default:
3458
break;
3459
}
3460
3461
//
3462
// Walk the arguments for first-pass checks and collection of information.
3463
//
3464
3465
int size = 0;
3466
bool constType = true;
3467
bool specConstType = false; // value is only valid if constType is true
3468
bool full = false;
3469
bool overFull = false;
3470
bool matrixInMatrix = false;
3471
bool arrayArg = false;
3472
bool floatArgument = false;
3473
bool intArgument = false;
3474
for (int arg = 0; arg < function.getParamCount(); ++arg) {
3475
if (function[arg].type->isArray()) {
3476
if (function[arg].type->isUnsizedArray()) {
3477
// Can't construct from an unsized array.
3478
error(loc, "array argument must be sized", constructorString.c_str(), "");
3479
return true;
3480
}
3481
arrayArg = true;
3482
}
3483
if (constructingMatrix && function[arg].type->isMatrix())
3484
matrixInMatrix = true;
3485
3486
// 'full' will go to true when enough args have been seen. If we loop
3487
// again, there is an extra argument.
3488
if (full) {
3489
// For vectors and matrices, it's okay to have too many components
3490
// available, but not okay to have unused arguments.
3491
overFull = true;
3492
}
3493
3494
size += function[arg].type->computeNumComponents();
3495
if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents())
3496
full = true;
3497
3498
if (! function[arg].type->getQualifier().isConstant())
3499
constType = false;
3500
if (function[arg].type->getQualifier().isSpecConstant())
3501
specConstType = true;
3502
if (function[arg].type->isFloatingDomain())
3503
floatArgument = true;
3504
if (function[arg].type->isIntegerDomain())
3505
intArgument = true;
3506
if (type.isStruct()) {
3507
if (function[arg].type->contains16BitFloat()) {
3508
requireFloat16Arithmetic(loc, constructorString.c_str(), "can't construct structure containing 16-bit type");
3509
}
3510
if (function[arg].type->contains16BitInt()) {
3511
requireInt16Arithmetic(loc, constructorString.c_str(), "can't construct structure containing 16-bit type");
3512
}
3513
if (function[arg].type->contains8BitInt()) {
3514
requireInt8Arithmetic(loc, constructorString.c_str(), "can't construct structure containing 8-bit type");
3515
}
3516
}
3517
}
3518
if (op == EOpConstructNonuniform)
3519
constType = false;
3520
3521
switch (op) {
3522
case EOpConstructFloat16:
3523
case EOpConstructF16Vec2:
3524
case EOpConstructF16Vec3:
3525
case EOpConstructF16Vec4:
3526
if (type.isArray())
3527
requireFloat16Arithmetic(loc, constructorString.c_str(), "16-bit arrays not supported");
3528
if (type.isVector() && function.getParamCount() != 1)
3529
requireFloat16Arithmetic(loc, constructorString.c_str(), "16-bit vectors only take vector types");
3530
break;
3531
case EOpConstructUint16:
3532
case EOpConstructU16Vec2:
3533
case EOpConstructU16Vec3:
3534
case EOpConstructU16Vec4:
3535
case EOpConstructInt16:
3536
case EOpConstructI16Vec2:
3537
case EOpConstructI16Vec3:
3538
case EOpConstructI16Vec4:
3539
if (type.isArray())
3540
requireInt16Arithmetic(loc, constructorString.c_str(), "16-bit arrays not supported");
3541
if (type.isVector() && function.getParamCount() != 1)
3542
requireInt16Arithmetic(loc, constructorString.c_str(), "16-bit vectors only take vector types");
3543
break;
3544
case EOpConstructUint8:
3545
case EOpConstructU8Vec2:
3546
case EOpConstructU8Vec3:
3547
case EOpConstructU8Vec4:
3548
case EOpConstructInt8:
3549
case EOpConstructI8Vec2:
3550
case EOpConstructI8Vec3:
3551
case EOpConstructI8Vec4:
3552
if (type.isArray())
3553
requireInt8Arithmetic(loc, constructorString.c_str(), "8-bit arrays not supported");
3554
if (type.isVector() && function.getParamCount() != 1)
3555
requireInt8Arithmetic(loc, constructorString.c_str(), "8-bit vectors only take vector types");
3556
break;
3557
default:
3558
break;
3559
}
3560
3561
// inherit constness from children
3562
if (constType) {
3563
bool makeSpecConst;
3564
// Finish pinning down spec-const semantics
3565
if (specConstType) {
3566
switch (op) {
3567
case EOpConstructInt8:
3568
case EOpConstructInt:
3569
case EOpConstructUint:
3570
case EOpConstructBool:
3571
case EOpConstructBVec2:
3572
case EOpConstructBVec3:
3573
case EOpConstructBVec4:
3574
case EOpConstructIVec2:
3575
case EOpConstructIVec3:
3576
case EOpConstructIVec4:
3577
case EOpConstructUVec2:
3578
case EOpConstructUVec3:
3579
case EOpConstructUVec4:
3580
case EOpConstructUint8:
3581
case EOpConstructInt16:
3582
case EOpConstructUint16:
3583
case EOpConstructInt64:
3584
case EOpConstructUint64:
3585
case EOpConstructI8Vec2:
3586
case EOpConstructI8Vec3:
3587
case EOpConstructI8Vec4:
3588
case EOpConstructU8Vec2:
3589
case EOpConstructU8Vec3:
3590
case EOpConstructU8Vec4:
3591
case EOpConstructI16Vec2:
3592
case EOpConstructI16Vec3:
3593
case EOpConstructI16Vec4:
3594
case EOpConstructU16Vec2:
3595
case EOpConstructU16Vec3:
3596
case EOpConstructU16Vec4:
3597
case EOpConstructI64Vec2:
3598
case EOpConstructI64Vec3:
3599
case EOpConstructI64Vec4:
3600
case EOpConstructU64Vec2:
3601
case EOpConstructU64Vec3:
3602
case EOpConstructU64Vec4:
3603
// This was the list of valid ones, if they aren't converting from float
3604
// and aren't making an array.
3605
makeSpecConst = ! floatArgument && ! type.isArray();
3606
break;
3607
3608
case EOpConstructVec2:
3609
case EOpConstructVec3:
3610
case EOpConstructVec4:
3611
// This was the list of valid ones, if they aren't converting from int
3612
// and aren't making an array.
3613
makeSpecConst = ! intArgument && !type.isArray();
3614
break;
3615
3616
default:
3617
// anything else wasn't white-listed in the spec as a conversion
3618
makeSpecConst = false;
3619
break;
3620
}
3621
} else
3622
makeSpecConst = false;
3623
3624
if (makeSpecConst)
3625
type.getQualifier().makeSpecConstant();
3626
else if (specConstType)
3627
type.getQualifier().makeTemporary();
3628
else
3629
type.getQualifier().storage = EvqConst;
3630
}
3631
3632
if (type.isArray()) {
3633
if (function.getParamCount() == 0) {
3634
error(loc, "array constructor must have at least one argument", constructorString.c_str(), "");
3635
return true;
3636
}
3637
3638
if (type.isUnsizedArray()) {
3639
// auto adapt the constructor type to the number of arguments
3640
type.changeOuterArraySize(function.getParamCount());
3641
} else if (type.getOuterArraySize() != function.getParamCount()) {
3642
error(loc, "array constructor needs one argument per array element", constructorString.c_str(), "");
3643
return true;
3644
}
3645
3646
if (type.isArrayOfArrays()) {
3647
// Types have to match, but we're still making the type.
3648
// Finish making the type, and the comparison is done later
3649
// when checking for conversion.
3650
TArraySizes& arraySizes = *type.getArraySizes();
3651
3652
// At least the dimensionalities have to match.
3653
if (! function[0].type->isArray() ||
3654
arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) {
3655
error(loc, "array constructor argument not correct type to construct array element", constructorString.c_str(), "");
3656
return true;
3657
}
3658
3659
if (arraySizes.isInnerUnsized()) {
3660
// "Arrays of arrays ..., and the size for any dimension is optional"
3661
// That means we need to adopt (from the first argument) the other array sizes into the type.
3662
for (int d = 1; d < arraySizes.getNumDims(); ++d) {
3663
if (arraySizes.getDimSize(d) == UnsizedArraySize) {
3664
arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1));
3665
}
3666
}
3667
}
3668
}
3669
}
3670
3671
if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) {
3672
error(loc, "constructing non-array constituent from array argument", constructorString.c_str(), "");
3673
return true;
3674
}
3675
3676
if (matrixInMatrix && ! type.isArray()) {
3677
profileRequires(loc, ENoProfile, 120, nullptr, "constructing matrix from matrix");
3678
3679
// "If a matrix argument is given to a matrix constructor,
3680
// it is a compile-time error to have any other arguments."
3681
if (function.getParamCount() != 1)
3682
error(loc, "matrix constructed from matrix can only have one argument", constructorString.c_str(), "");
3683
return false;
3684
}
3685
3686
if (overFull) {
3687
error(loc, "too many arguments", constructorString.c_str(), "");
3688
return true;
3689
}
3690
3691
if (op == EOpConstructStruct && ! type.isArray() && (int)type.getStruct()->size() != function.getParamCount()) {
3692
error(loc, "Number of constructor parameters does not match the number of structure fields", constructorString.c_str(), "");
3693
return true;
3694
}
3695
3696
if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) ||
3697
(op == EOpConstructStruct && size < type.computeNumComponents())) {
3698
error(loc, "not enough data provided for construction", constructorString.c_str(), "");
3699
return true;
3700
}
3701
3702
if (type.isCoopMat() && function.getParamCount() != 1) {
3703
error(loc, "wrong number of arguments", constructorString.c_str(), "");
3704
return true;
3705
}
3706
if (type.isCoopMat() &&
3707
!(function[0].type->isScalar() || function[0].type->isCoopMat())) {
3708
error(loc, "Cooperative matrix constructor argument must be scalar or cooperative matrix", constructorString.c_str(), "");
3709
return true;
3710
}
3711
3712
TIntermTyped* typed = node->getAsTyped();
3713
if (type.isCoopMat() && typed->getType().isCoopMat() &&
3714
!type.sameCoopMatShapeAndUse(typed->getType())) {
3715
error(loc, "Cooperative matrix type parameters mismatch", constructorString.c_str(), "");
3716
return true;
3717
}
3718
3719
if (typed == nullptr) {
3720
error(loc, "constructor argument does not have a type", constructorString.c_str(), "");
3721
return true;
3722
}
3723
if (op != EOpConstructStruct && op != EOpConstructNonuniform && typed->getBasicType() == EbtSampler) {
3724
if (op == EOpConstructUVec2 && extensionTurnedOn(E_GL_ARB_bindless_texture)) {
3725
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeFunc);
3726
}
3727
else {
3728
error(loc, "cannot convert a sampler", constructorString.c_str(), "");
3729
return true;
3730
}
3731
}
3732
if (op != EOpConstructStruct && typed->isAtomic()) {
3733
error(loc, "cannot convert an atomic_uint", constructorString.c_str(), "");
3734
return true;
3735
}
3736
if (typed->getBasicType() == EbtVoid) {
3737
error(loc, "cannot convert a void", constructorString.c_str(), "");
3738
return true;
3739
}
3740
3741
return false;
3742
}
3743
3744
// Verify all the correct semantics for constructing a combined texture/sampler.
3745
// Return true if the semantics are incorrect.
3746
bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const TFunction& function)
3747
{
3748
TString constructorName = function.getType().getBasicTypeString(); // TODO: performance: should not be making copy; interface needs to change
3749
const char* token = constructorName.c_str();
3750
// verify the constructor for bindless texture, the input must be ivec2 or uvec2
3751
if (function.getParamCount() == 1) {
3752
TType* pType = function[0].type;
3753
TBasicType basicType = pType->getBasicType();
3754
bool isIntegerVec2 = ((basicType == EbtUint || basicType == EbtInt) && pType->getVectorSize() == 2);
3755
bool bindlessMode = extensionTurnedOn(E_GL_ARB_bindless_texture);
3756
if (isIntegerVec2 && bindlessMode) {
3757
if (pType->getSampler().isImage())
3758
intermediate.setBindlessImageMode(currentCaller, AstRefTypeFunc);
3759
else
3760
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeFunc);
3761
return false;
3762
} else {
3763
if (!bindlessMode)
3764
error(loc, "sampler-constructor requires the extension GL_ARB_bindless_texture enabled", token, "");
3765
else
3766
error(loc, "sampler-constructor requires the input to be ivec2 or uvec2", token, "");
3767
return true;
3768
}
3769
}
3770
3771
// exactly two arguments needed
3772
if (function.getParamCount() != 2) {
3773
error(loc, "sampler-constructor requires two arguments", token, "");
3774
return true;
3775
}
3776
3777
// For now, not allowing arrayed constructors, the rest of this function
3778
// is set up to allow them, if this test is removed:
3779
if (function.getType().isArray()) {
3780
error(loc, "sampler-constructor cannot make an array of samplers", token, "");
3781
return true;
3782
}
3783
3784
// first argument
3785
// * the constructor's first argument must be a texture type
3786
// * the dimensionality (1D, 2D, 3D, Cube, Rect, Buffer, MS, and Array)
3787
// of the texture type must match that of the constructed sampler type
3788
// (that is, the suffixes of the type of the first argument and the
3789
// type of the constructor will be spelled the same way)
3790
if (function[0].type->getBasicType() != EbtSampler ||
3791
! function[0].type->getSampler().isTexture() ||
3792
function[0].type->isArray()) {
3793
error(loc, "sampler-constructor first argument must be a scalar *texture* type", token, "");
3794
return true;
3795
}
3796
// simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=()
3797
TSampler texture = function.getType().getSampler();
3798
texture.setCombined(false);
3799
texture.shadow = false;
3800
if (texture != function[0].type->getSampler()) {
3801
error(loc, "sampler-constructor first argument must be a *texture* type"
3802
" matching the dimensionality and sampled type of the constructor", token, "");
3803
return true;
3804
}
3805
3806
// second argument
3807
// * the constructor's second argument must be a scalar of type
3808
// *sampler* or *samplerShadow*
3809
if ( function[1].type->getBasicType() != EbtSampler ||
3810
! function[1].type->getSampler().isPureSampler() ||
3811
function[1].type->isArray()) {
3812
error(loc, "sampler-constructor second argument must be a scalar sampler or samplerShadow", token, "");
3813
return true;
3814
}
3815
3816
return false;
3817
}
3818
3819
// Checks to see if a void variable has been declared and raise an error message for such a case
3820
//
3821
// returns true in case of an error
3822
//
3823
bool TParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType)
3824
{
3825
if (basicType == EbtVoid) {
3826
error(loc, "illegal use of type 'void'", identifier.c_str(), "");
3827
return true;
3828
}
3829
3830
return false;
3831
}
3832
3833
// Checks to see if the node (for the expression) contains a scalar boolean expression or not
3834
void TParseContext::boolCheck(const TSourceLoc& loc, const TIntermTyped* type)
3835
{
3836
if (type->getBasicType() != EbtBool || type->isArray() || type->isMatrix() || type->isVector())
3837
error(loc, "boolean expression expected", "", "");
3838
}
3839
3840
// This function checks to see if the node (for the expression) contains a scalar boolean expression or not
3841
void TParseContext::boolCheck(const TSourceLoc& loc, const TPublicType& pType)
3842
{
3843
if (pType.basicType != EbtBool || pType.arraySizes || pType.matrixCols > 1 || (pType.vectorSize > 1))
3844
error(loc, "boolean expression expected", "", "");
3845
}
3846
3847
void TParseContext::samplerCheck(const TSourceLoc& loc, const TType& type, const TString& identifier, TIntermTyped* /*initializer*/)
3848
{
3849
// Check that the appropriate extension is enabled if external sampler is used.
3850
// There are two extensions. The correct one must be used based on GLSL version.
3851
if (type.getBasicType() == EbtSampler && type.getSampler().isExternal()) {
3852
if (version < 300) {
3853
requireExtensions(loc, 1, &E_GL_OES_EGL_image_external, "samplerExternalOES");
3854
} else {
3855
requireExtensions(loc, 1, &E_GL_OES_EGL_image_external_essl3, "samplerExternalOES");
3856
}
3857
}
3858
if (type.getSampler().isYuv()) {
3859
requireExtensions(loc, 1, &E_GL_EXT_YUV_target, "__samplerExternal2DY2YEXT");
3860
}
3861
3862
if (type.getQualifier().storage == EvqUniform)
3863
return;
3864
3865
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtSampler)) {
3866
// For bindless texture, sampler can be declared as an struct member
3867
if (extensionTurnedOn(E_GL_ARB_bindless_texture)) {
3868
if (type.getSampler().isImage())
3869
intermediate.setBindlessImageMode(currentCaller, AstRefTypeVar);
3870
else
3871
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeVar);
3872
}
3873
else {
3874
error(loc, "non-uniform struct contains a sampler or image:", type.getBasicTypeString().c_str(), identifier.c_str());
3875
}
3876
}
3877
else if (type.getBasicType() == EbtSampler && type.getQualifier().storage != EvqUniform) {
3878
// For bindless texture, sampler can be declared as an input/output/block member
3879
if (extensionTurnedOn(E_GL_ARB_bindless_texture)) {
3880
if (type.getSampler().isImage())
3881
intermediate.setBindlessImageMode(currentCaller, AstRefTypeVar);
3882
else
3883
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeVar);
3884
}
3885
else {
3886
// non-uniform sampler
3887
// not yet: okay if it has an initializer
3888
// if (! initializer)
3889
if (type.getSampler().isAttachmentEXT() && type.getQualifier().storage != EvqTileImageEXT)
3890
error(loc, "can only be used in tileImageEXT variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
3891
else if (type.getQualifier().storage != EvqTileImageEXT)
3892
error(loc, "sampler/image types can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
3893
}
3894
}
3895
}
3896
3897
void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
3898
{
3899
if (type.getQualifier().storage == EvqUniform)
3900
return;
3901
3902
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAtomicUint))
3903
error(loc, "non-uniform struct contains an atomic_uint:", type.getBasicTypeString().c_str(), identifier.c_str());
3904
else if (type.getBasicType() == EbtAtomicUint && type.getQualifier().storage != EvqUniform)
3905
error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
3906
}
3907
3908
void TParseContext::accStructCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
3909
{
3910
if (type.getQualifier().storage == EvqUniform)
3911
return;
3912
3913
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStruct))
3914
error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str());
3915
else if (type.getBasicType() == EbtAccStruct && type.getQualifier().storage != EvqUniform)
3916
error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:",
3917
type.getBasicTypeString().c_str(), identifier.c_str());
3918
3919
}
3920
3921
void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
3922
{
3923
if (parsingBuiltins)
3924
return;
3925
3926
if (type.getQualifier().storage != EvqUniform)
3927
return;
3928
3929
if (type.containsNonOpaque()) {
3930
// Vulkan doesn't allow transparent uniforms outside of blocks
3931
if (spvVersion.vulkan > 0 && !spvVersion.vulkanRelaxed)
3932
vulkanRemoved(loc, "non-opaque uniforms outside a block");
3933
// OpenGL wants locations on these (unless they are getting automapped)
3934
if (spvVersion.openGl > 0 && !type.getQualifier().hasLocation() && !intermediate.getAutoMapLocations())
3935
error(loc, "non-opaque uniform variables need a layout(location=L)", identifier.c_str(), "");
3936
}
3937
}
3938
3939
//
3940
// Qualifier checks knowing the qualifier and that it is a member of a struct/block.
3941
//
3942
void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType)
3943
{
3944
globalQualifierFixCheck(publicType.loc, publicType.qualifier, true);
3945
checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers);
3946
if (publicType.qualifier.isNonUniform()) {
3947
error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", "");
3948
publicType.qualifier.nonUniform = false;
3949
}
3950
}
3951
3952
//
3953
// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level.
3954
//
3955
void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier, bool isMemberCheck, const TPublicType* publicType)
3956
{
3957
bool nonuniformOkay = false;
3958
3959
// move from parameter/unknown qualifiers to pipeline in/out qualifiers
3960
switch (qualifier.storage) {
3961
case EvqIn:
3962
profileRequires(loc, ENoProfile, 130, nullptr, "in for stage inputs");
3963
profileRequires(loc, EEsProfile, 300, nullptr, "in for stage inputs");
3964
qualifier.storage = EvqVaryingIn;
3965
nonuniformOkay = true;
3966
break;
3967
case EvqOut:
3968
profileRequires(loc, ENoProfile, 130, nullptr, "out for stage outputs");
3969
profileRequires(loc, EEsProfile, 300, nullptr, "out for stage outputs");
3970
qualifier.storage = EvqVaryingOut;
3971
if (intermediate.isInvariantAll())
3972
qualifier.invariant = true;
3973
break;
3974
case EvqInOut:
3975
qualifier.storage = EvqVaryingIn;
3976
error(loc, "cannot use 'inout' at global scope", "", "");
3977
break;
3978
case EvqGlobal:
3979
case EvqTemporary:
3980
nonuniformOkay = true;
3981
break;
3982
case EvqUniform:
3983
// According to GLSL spec: The std430 qualifier is supported only for shader storage blocks; a shader using
3984
// the std430 qualifier on a uniform block will fail to compile.
3985
// Only check the global declaration: layout(std430) uniform;
3986
if (blockName == nullptr &&
3987
qualifier.layoutPacking == ElpStd430)
3988
{
3989
requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "default std430 layout for uniform");
3990
}
3991
3992
if (publicType != nullptr && publicType->isImage() &&
3993
(qualifier.layoutFormat > ElfExtSizeGuard && qualifier.layoutFormat < ElfCount))
3994
qualifier.layoutFormat = mapLegacyLayoutFormat(qualifier.layoutFormat, publicType->sampler.getBasicType());
3995
3996
break;
3997
default:
3998
break;
3999
}
4000
4001
if (!nonuniformOkay && qualifier.isNonUniform())
4002
error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", "");
4003
4004
if (qualifier.isSpirvByReference())
4005
error(loc, "can only apply to parameter", "spirv_by_reference", "");
4006
4007
if (qualifier.isSpirvLiteral())
4008
error(loc, "can only apply to parameter", "spirv_literal", "");
4009
4010
// Storage qualifier isn't ready for memberQualifierCheck, we should skip invariantCheck for it.
4011
if (!isMemberCheck || structNestingLevel > 0)
4012
invariantCheck(loc, qualifier);
4013
4014
if (qualifier.isFullQuads()) {
4015
if (qualifier.storage != EvqVaryingIn)
4016
error(loc, "can only apply to input layout", "full_quads ", "");
4017
intermediate.setReqFullQuadsMode();
4018
}
4019
4020
if (qualifier.isQuadDeriv()) {
4021
if (qualifier.storage != EvqVaryingIn)
4022
error(loc, "can only apply to input layout", "quad_derivatives", "");
4023
intermediate.setQuadDerivMode();
4024
}
4025
}
4026
4027
//
4028
// Check a full qualifier and type (no variable yet) at global level.
4029
//
4030
void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQualifier& qualifier, const TPublicType& publicType)
4031
{
4032
if (! symbolTable.atGlobalLevel())
4033
return;
4034
4035
if (!(publicType.userDef && publicType.userDef->isReference()) && !parsingBuiltins) {
4036
if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) {
4037
error(loc, "memory qualifiers cannot be used on this type", "", "");
4038
} else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) {
4039
error(loc, "memory qualifiers cannot be used on this type", "", "");
4040
}
4041
}
4042
4043
if (qualifier.storage == EvqBuffer &&
4044
publicType.basicType != EbtBlock &&
4045
!qualifier.hasBufferReference())
4046
error(loc, "buffers can be declared only as blocks", "buffer", "");
4047
4048
if (qualifier.storage != EvqVaryingIn && publicType.basicType == EbtDouble &&
4049
extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) && language == EShLangVertex &&
4050
version < 400) {
4051
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 410, E_GL_ARB_gpu_shader_fp64, "vertex-shader `double` type");
4052
}
4053
if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut)
4054
return;
4055
4056
if (publicType.shaderQualifiers.hasBlendEquation())
4057
error(loc, "can only be applied to a standalone 'out'", "blend equation", "");
4058
4059
// now, knowing it is a shader in/out, do all the in/out semantic checks
4060
4061
if (publicType.basicType == EbtBool && !parsingBuiltins) {
4062
error(loc, "cannot be bool", GetStorageQualifierString(qualifier.storage), "");
4063
return;
4064
}
4065
4066
if (isTypeInt(publicType.basicType) || publicType.basicType == EbtDouble) {
4067
profileRequires(loc, EEsProfile, 300, nullptr, "non-float shader input/output");
4068
profileRequires(loc, ~EEsProfile, 130, nullptr, "non-float shader input/output");
4069
}
4070
4071
if (!qualifier.flat && !qualifier.isExplicitInterpolation() && !qualifier.isPervertexNV() && !qualifier.isPervertexEXT()) {
4072
if (isTypeInt(publicType.basicType) ||
4073
publicType.basicType == EbtDouble ||
4074
(publicType.userDef && ( publicType.userDef->containsBasicType(EbtInt)
4075
|| publicType.userDef->containsBasicType(EbtUint)
4076
|| publicType.userDef->contains16BitInt()
4077
|| publicType.userDef->contains8BitInt()
4078
|| publicType.userDef->contains64BitInt()
4079
|| publicType.userDef->containsDouble()))) {
4080
if (qualifier.storage == EvqVaryingIn && language == EShLangFragment)
4081
error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
4082
else if (qualifier.storage == EvqVaryingOut && language == EShLangVertex && version == 300)
4083
error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
4084
}
4085
}
4086
4087
if (qualifier.isPatch() && qualifier.isInterpolation())
4088
error(loc, "cannot use interpolation qualifiers with patch", "patch", "");
4089
4090
if (qualifier.isTaskPayload() && publicType.basicType == EbtBlock)
4091
error(loc, "taskPayloadSharedEXT variables should not be declared as interface blocks", "taskPayloadSharedEXT", "");
4092
4093
if (qualifier.isTaskMemory() && publicType.basicType != EbtBlock)
4094
error(loc, "taskNV variables can be declared only as blocks", "taskNV", "");
4095
4096
if (qualifier.storage == EvqVaryingIn) {
4097
switch (language) {
4098
case EShLangVertex:
4099
if (publicType.basicType == EbtStruct) {
4100
error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), "");
4101
return;
4102
}
4103
if (publicType.arraySizes) {
4104
requireProfile(loc, ~EEsProfile, "vertex input arrays");
4105
profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
4106
}
4107
if (publicType.basicType == EbtDouble)
4108
profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_vertex_attrib_64bit, "vertex-shader `double` type input");
4109
if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant)
4110
error(loc, "vertex input cannot be further qualified", "", "");
4111
break;
4112
case EShLangFragment:
4113
if (publicType.userDef) {
4114
profileRequires(loc, EEsProfile, 300, nullptr, "fragment-shader struct input");
4115
profileRequires(loc, ~EEsProfile, 150, nullptr, "fragment-shader struct input");
4116
if (publicType.userDef->containsStructure())
4117
requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing structure");
4118
if (publicType.userDef->containsArray())
4119
requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing an array");
4120
}
4121
break;
4122
case EShLangCompute:
4123
if (! symbolTable.atBuiltInLevel())
4124
error(loc, "global storage input qualifier cannot be used in a compute shader", "in", "");
4125
break;
4126
case EShLangTessControl:
4127
if (qualifier.patch)
4128
error(loc, "can only use on output in tessellation-control shader", "patch", "");
4129
break;
4130
default:
4131
break;
4132
}
4133
} else {
4134
// qualifier.storage == EvqVaryingOut
4135
switch (language) {
4136
case EShLangVertex:
4137
if (publicType.userDef) {
4138
profileRequires(loc, EEsProfile, 300, nullptr, "vertex-shader struct output");
4139
profileRequires(loc, ~EEsProfile, 150, nullptr, "vertex-shader struct output");
4140
if (publicType.userDef->containsStructure())
4141
requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing structure");
4142
if (publicType.userDef->containsArray())
4143
requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing an array");
4144
}
4145
4146
break;
4147
case EShLangFragment:
4148
profileRequires(loc, EEsProfile, 300, nullptr, "fragment shader output");
4149
if (publicType.basicType == EbtStruct) {
4150
error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), "");
4151
return;
4152
}
4153
if (publicType.matrixRows > 0) {
4154
error(loc, "cannot be a matrix", GetStorageQualifierString(qualifier.storage), "");
4155
return;
4156
}
4157
if (qualifier.isAuxiliary())
4158
error(loc, "can't use auxiliary qualifier on a fragment output", "centroid/sample/patch", "");
4159
if (qualifier.isInterpolation())
4160
error(loc, "can't use interpolation qualifier on a fragment output", "flat/smooth/noperspective", "");
4161
if (publicType.basicType == EbtDouble || publicType.basicType == EbtInt64 || publicType.basicType == EbtUint64)
4162
error(loc, "cannot contain a double, int64, or uint64", GetStorageQualifierString(qualifier.storage), "");
4163
break;
4164
4165
case EShLangCompute:
4166
error(loc, "global storage output qualifier cannot be used in a compute shader", "out", "");
4167
break;
4168
case EShLangTessEvaluation:
4169
if (qualifier.patch)
4170
error(loc, "can only use on input in tessellation-evaluation shader", "patch", "");
4171
break;
4172
default:
4173
break;
4174
}
4175
}
4176
}
4177
4178
//
4179
// Merge characteristics of the 'src' qualifier into the 'dst'.
4180
// If there is duplication, issue error messages, unless 'force'
4181
// is specified, which means to just override default settings.
4182
//
4183
// Also, when force is false, it will be assumed that 'src' follows
4184
// 'dst', for the purpose of error checking order for versions
4185
// that require specific orderings of qualifiers.
4186
//
4187
void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, const TQualifier& src, bool force)
4188
{
4189
// Multiple auxiliary qualifiers (mostly done later by 'individual qualifiers')
4190
if (src.isAuxiliary() && dst.isAuxiliary())
4191
error(loc, "can only have one auxiliary qualifier (centroid, patch, and sample)", "", "");
4192
4193
// Multiple interpolation qualifiers (mostly done later by 'individual qualifiers')
4194
if (src.isInterpolation() && dst.isInterpolation())
4195
error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective, __explicitInterpAMD)", "", "");
4196
4197
// Ordering
4198
if (! force && ((!isEsProfile() && version < 420) ||
4199
(isEsProfile() && version < 310))
4200
&& ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) {
4201
// non-function parameters
4202
if (src.isNoContraction() && (dst.invariant || dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
4203
error(loc, "precise qualifier must appear first", "", "");
4204
if (src.invariant && (dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
4205
error(loc, "invariant qualifier must appear before interpolation, storage, and precision qualifiers ", "", "");
4206
else if (src.isInterpolation() && (dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
4207
error(loc, "interpolation qualifiers must appear before storage and precision qualifiers", "", "");
4208
else if (src.isAuxiliary() && (dst.storage != EvqTemporary || dst.precision != EpqNone))
4209
error(loc, "Auxiliary qualifiers (centroid, patch, and sample) must appear before storage and precision qualifiers", "", "");
4210
else if (src.storage != EvqTemporary && (dst.precision != EpqNone))
4211
error(loc, "precision qualifier must appear as last qualifier", "", "");
4212
4213
// function parameters
4214
if (src.isNoContraction() && (dst.storage == EvqConst || dst.storage == EvqIn || dst.storage == EvqOut))
4215
error(loc, "precise qualifier must appear first", "", "");
4216
if (src.storage == EvqConst && (dst.storage == EvqIn || dst.storage == EvqOut))
4217
error(loc, "in/out must appear before const", "", "");
4218
}
4219
4220
// Storage qualification
4221
if (dst.storage == EvqTemporary || dst.storage == EvqGlobal)
4222
dst.storage = src.storage;
4223
else if ((dst.storage == EvqIn && src.storage == EvqOut) ||
4224
(dst.storage == EvqOut && src.storage == EvqIn))
4225
dst.storage = EvqInOut;
4226
else if ((dst.storage == EvqIn && src.storage == EvqConst) ||
4227
(dst.storage == EvqConst && src.storage == EvqIn))
4228
dst.storage = EvqConstReadOnly;
4229
else if (src.storage != EvqTemporary &&
4230
src.storage != EvqGlobal)
4231
error(loc, "too many storage qualifiers", GetStorageQualifierString(src.storage), "");
4232
4233
// Precision qualifiers
4234
if (! force && src.precision != EpqNone && dst.precision != EpqNone)
4235
error(loc, "only one precision qualifier allowed", GetPrecisionQualifierString(src.precision), "");
4236
if (dst.precision == EpqNone || (force && src.precision != EpqNone))
4237
dst.precision = src.precision;
4238
4239
if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
4240
(src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
4241
(src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
4242
(src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent || dst.shadercallcoherent)) ||
4243
(src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.shadercallcoherent)) ||
4244
(src.shadercallcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)))) {
4245
error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent/shadercallcoherent qualifier allowed",
4246
GetPrecisionQualifierString(src.precision), "");
4247
}
4248
4249
// Layout qualifiers
4250
mergeObjectLayoutQualifiers(dst, src, false);
4251
4252
// individual qualifiers
4253
bool repeated = false;
4254
#define MERGE_SINGLETON(field) repeated |= dst.field && src.field; dst.field |= src.field;
4255
MERGE_SINGLETON(invariant);
4256
MERGE_SINGLETON(centroid);
4257
MERGE_SINGLETON(smooth);
4258
MERGE_SINGLETON(flat);
4259
MERGE_SINGLETON(specConstant);
4260
MERGE_SINGLETON(noContraction);
4261
MERGE_SINGLETON(nopersp);
4262
MERGE_SINGLETON(explicitInterp);
4263
MERGE_SINGLETON(perPrimitiveNV);
4264
MERGE_SINGLETON(perViewNV);
4265
MERGE_SINGLETON(perTaskNV);
4266
MERGE_SINGLETON(patch);
4267
MERGE_SINGLETON(sample);
4268
MERGE_SINGLETON(coherent);
4269
MERGE_SINGLETON(devicecoherent);
4270
MERGE_SINGLETON(queuefamilycoherent);
4271
MERGE_SINGLETON(workgroupcoherent);
4272
MERGE_SINGLETON(subgroupcoherent);
4273
MERGE_SINGLETON(shadercallcoherent);
4274
MERGE_SINGLETON(nonprivate);
4275
MERGE_SINGLETON(volatil);
4276
MERGE_SINGLETON(restrict);
4277
MERGE_SINGLETON(readonly);
4278
MERGE_SINGLETON(writeonly);
4279
MERGE_SINGLETON(nonUniform);
4280
4281
// SPIR-V storage class qualifier (GL_EXT_spirv_intrinsics)
4282
dst.spirvStorageClass = src.spirvStorageClass;
4283
4284
// SPIR-V decorate qualifiers (GL_EXT_spirv_intrinsics)
4285
if (src.hasSpirvDecorate()) {
4286
if (dst.hasSpirvDecorate()) {
4287
const TSpirvDecorate& srcSpirvDecorate = src.getSpirvDecorate();
4288
TSpirvDecorate& dstSpirvDecorate = dst.getSpirvDecorate();
4289
for (auto& decorate : srcSpirvDecorate.decorates) {
4290
if (dstSpirvDecorate.decorates.find(decorate.first) != dstSpirvDecorate.decorates.end())
4291
error(loc, "too many SPIR-V decorate qualifiers", "spirv_decorate", "(decoration=%u)", decorate.first);
4292
else
4293
dstSpirvDecorate.decorates.insert(decorate);
4294
}
4295
4296
for (auto& decorateId : srcSpirvDecorate.decorateIds) {
4297
if (dstSpirvDecorate.decorateIds.find(decorateId.first) != dstSpirvDecorate.decorateIds.end())
4298
error(loc, "too many SPIR-V decorate qualifiers", "spirv_decorate_id", "(decoration=%u)", decorateId.first);
4299
else
4300
dstSpirvDecorate.decorateIds.insert(decorateId);
4301
}
4302
4303
for (auto& decorateString : srcSpirvDecorate.decorateStrings) {
4304
if (dstSpirvDecorate.decorates.find(decorateString.first) != dstSpirvDecorate.decorates.end())
4305
error(loc, "too many SPIR-V decorate qualifiers", "spirv_decorate_string", "(decoration=%u)", decorateString.first);
4306
else
4307
dstSpirvDecorate.decorateStrings.insert(decorateString);
4308
}
4309
} else {
4310
dst.spirvDecorate = src.spirvDecorate;
4311
}
4312
}
4313
4314
if (repeated)
4315
error(loc, "replicated qualifiers", "", "");
4316
}
4317
4318
void TParseContext::setDefaultPrecision(const TSourceLoc& loc, TPublicType& publicType, TPrecisionQualifier qualifier)
4319
{
4320
TBasicType basicType = publicType.basicType;
4321
4322
if (basicType == EbtSampler) {
4323
defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)] = qualifier;
4324
4325
return; // all is well
4326
}
4327
4328
if (basicType == EbtInt || basicType == EbtFloat) {
4329
if (publicType.isScalar()) {
4330
defaultPrecision[basicType] = qualifier;
4331
if (basicType == EbtInt) {
4332
defaultPrecision[EbtUint] = qualifier;
4333
precisionManager.explicitIntDefaultSeen();
4334
} else
4335
precisionManager.explicitFloatDefaultSeen();
4336
4337
return; // all is well
4338
}
4339
}
4340
4341
if (basicType == EbtAtomicUint) {
4342
if (qualifier != EpqHigh)
4343
error(loc, "can only apply highp to atomic_uint", "precision", "");
4344
4345
return;
4346
}
4347
4348
error(loc, "cannot apply precision statement to this type; use 'float', 'int' or a sampler type", TType::getBasicString(basicType), "");
4349
}
4350
4351
// used to flatten the sampler type space into a single dimension
4352
// correlates with the declaration of defaultSamplerPrecision[]
4353
int TParseContext::computeSamplerTypeIndex(TSampler& sampler)
4354
{
4355
int arrayIndex = sampler.arrayed ? 1 : 0;
4356
int shadowIndex = sampler.shadow ? 1 : 0;
4357
int externalIndex = sampler.isExternal() ? 1 : 0;
4358
int imageIndex = sampler.isImageClass() ? 1 : 0;
4359
int msIndex = sampler.isMultiSample() ? 1 : 0;
4360
4361
int flattened = EsdNumDims * (EbtNumTypes * (2 * (2 * (2 * (2 * arrayIndex + msIndex) + imageIndex) + shadowIndex) +
4362
externalIndex) + sampler.type) + sampler.dim;
4363
assert(flattened < maxSamplerIndex);
4364
4365
return flattened;
4366
}
4367
4368
TPrecisionQualifier TParseContext::getDefaultPrecision(TPublicType& publicType)
4369
{
4370
if (publicType.basicType == EbtSampler)
4371
return defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)];
4372
else
4373
return defaultPrecision[publicType.basicType];
4374
}
4375
4376
void TParseContext::precisionQualifierCheck(const TSourceLoc& loc, TBasicType baseType, TQualifier& qualifier, bool isCoopMat)
4377
{
4378
// Built-in symbols are allowed some ambiguous precisions, to be pinned down
4379
// later by context.
4380
if (! obeyPrecisionQualifiers() || parsingBuiltins)
4381
return;
4382
4383
if (baseType == EbtAtomicUint && qualifier.precision != EpqNone && qualifier.precision != EpqHigh)
4384
error(loc, "atomic counters can only be highp", "atomic_uint", "");
4385
4386
if (isCoopMat)
4387
return;
4388
4389
if (baseType == EbtFloat || baseType == EbtUint || baseType == EbtInt || baseType == EbtSampler || baseType == EbtAtomicUint) {
4390
if (qualifier.precision == EpqNone) {
4391
if (relaxedErrors())
4392
warn(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "substituting 'mediump'");
4393
else
4394
error(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "");
4395
qualifier.precision = EpqMedium;
4396
defaultPrecision[baseType] = EpqMedium;
4397
}
4398
} else if (qualifier.precision != EpqNone)
4399
error(loc, "type cannot have precision qualifier", TType::getBasicString(baseType), "");
4400
}
4401
4402
void TParseContext::parameterTypeCheck(const TSourceLoc& loc, TStorageQualifier qualifier, const TType& type)
4403
{
4404
if ((qualifier == EvqOut || qualifier == EvqInOut) && type.isOpaque() && !intermediate.getBindlessMode())
4405
error(loc, "samplers and atomic_uints cannot be output parameters", type.getBasicTypeString().c_str(), "");
4406
if (!parsingBuiltins && type.contains16BitFloat())
4407
requireFloat16Arithmetic(loc, type.getBasicTypeString().c_str(), "float16 types can only be in uniform block or buffer storage");
4408
if (!parsingBuiltins && type.contains16BitInt())
4409
requireInt16Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int16 types can only be in uniform block or buffer storage");
4410
if (!parsingBuiltins && type.contains8BitInt())
4411
requireInt8Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int8 types can only be in uniform block or buffer storage");
4412
}
4413
4414
bool TParseContext::containsFieldWithBasicType(const TType& type, TBasicType basicType)
4415
{
4416
if (type.getBasicType() == basicType)
4417
return true;
4418
4419
if (type.getBasicType() == EbtStruct) {
4420
const TTypeList& structure = *type.getStruct();
4421
for (unsigned int i = 0; i < structure.size(); ++i) {
4422
if (containsFieldWithBasicType(*structure[i].type, basicType))
4423
return true;
4424
}
4425
}
4426
4427
return false;
4428
}
4429
4430
//
4431
// Do size checking for an array type's size.
4432
//
4433
void TParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair,
4434
const char* sizeType, const bool allowZero)
4435
{
4436
bool isConst = false;
4437
sizePair.node = nullptr;
4438
4439
int size = 1;
4440
4441
TIntermConstantUnion* constant = expr->getAsConstantUnion();
4442
if (constant) {
4443
// handle true (non-specialization) constant
4444
size = constant->getConstArray()[0].getIConst();
4445
isConst = true;
4446
} else {
4447
// see if it's a specialization constant instead
4448
if (expr->getQualifier().isSpecConstant()) {
4449
isConst = true;
4450
sizePair.node = expr;
4451
TIntermSymbol* symbol = expr->getAsSymbolNode();
4452
if (symbol && symbol->getConstArray().size() > 0)
4453
size = symbol->getConstArray()[0].getIConst();
4454
} else if (expr->getAsUnaryNode() && expr->getAsUnaryNode()->getOp() == glslang::EOpArrayLength &&
4455
expr->getAsUnaryNode()->getOperand()->getType().isCoopMatNV()) {
4456
isConst = true;
4457
size = 1;
4458
sizePair.node = expr->getAsUnaryNode();
4459
}
4460
}
4461
4462
sizePair.size = size;
4463
4464
if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) {
4465
error(loc, sizeType, "", "must be a constant integer expression");
4466
return;
4467
}
4468
4469
if (allowZero) {
4470
if (size < 0) {
4471
error(loc, sizeType, "", "must be a non-negative integer");
4472
return;
4473
}
4474
} else {
4475
if (size <= 0) {
4476
error(loc, sizeType, "", "must be a positive integer");
4477
return;
4478
}
4479
}
4480
}
4481
4482
//
4483
// See if this qualifier can be an array.
4484
//
4485
// Returns true if there is an error.
4486
//
4487
bool TParseContext::arrayQualifierError(const TSourceLoc& loc, const TQualifier& qualifier)
4488
{
4489
if (qualifier.storage == EvqConst) {
4490
profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "const array");
4491
profileRequires(loc, EEsProfile, 300, nullptr, "const array");
4492
}
4493
4494
if (qualifier.storage == EvqVaryingIn && language == EShLangVertex) {
4495
requireProfile(loc, ~EEsProfile, "vertex input arrays");
4496
profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
4497
}
4498
4499
return false;
4500
}
4501
4502
//
4503
// See if this qualifier and type combination can be an array.
4504
// Assumes arrayQualifierError() was also called to catch the type-invariant tests.
4505
//
4506
// Returns true if there is an error.
4507
//
4508
bool TParseContext::arrayError(const TSourceLoc& loc, const TType& type)
4509
{
4510
if (type.getQualifier().storage == EvqVaryingOut && language == EShLangVertex) {
4511
if (type.isArrayOfArrays())
4512
requireProfile(loc, ~EEsProfile, "vertex-shader array-of-array output");
4513
else if (type.isStruct())
4514
requireProfile(loc, ~EEsProfile, "vertex-shader array-of-struct output");
4515
}
4516
if (type.getQualifier().storage == EvqVaryingIn && language == EShLangFragment) {
4517
if (type.isArrayOfArrays())
4518
requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array input");
4519
else if (type.isStruct())
4520
requireProfile(loc, ~EEsProfile, "fragment-shader array-of-struct input");
4521
}
4522
if (type.getQualifier().storage == EvqVaryingOut && language == EShLangFragment) {
4523
if (type.isArrayOfArrays())
4524
requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array output");
4525
}
4526
4527
return false;
4528
}
4529
4530
//
4531
// Require array to be completely sized
4532
//
4533
void TParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes)
4534
{
4535
if (!parsingBuiltins && arraySizes.hasUnsized())
4536
error(loc, "array size required", "", "");
4537
}
4538
4539
void TParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type)
4540
{
4541
const TTypeList& structure = *type.getStruct();
4542
for (int m = 0; m < (int)structure.size(); ++m) {
4543
const TType& member = *structure[m].type;
4544
if (member.isArray())
4545
arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes());
4546
}
4547
}
4548
4549
void TParseContext::arraySizesCheck(const TSourceLoc& loc, const TQualifier& qualifier, TArraySizes* arraySizes,
4550
const TIntermTyped* initializer, bool lastMember)
4551
{
4552
assert(arraySizes);
4553
4554
// always allow special built-in ins/outs sized to topologies
4555
if (parsingBuiltins)
4556
return;
4557
4558
// initializer must be a sized array, in which case
4559
// allow the initializer to set any unknown array sizes
4560
if (initializer != nullptr) {
4561
if (initializer->getType().isUnsizedArray())
4562
error(loc, "array initializer must be sized", "[]", "");
4563
return;
4564
}
4565
4566
// No environment allows any non-outer-dimension to be implicitly sized
4567
if (arraySizes->isInnerUnsized()) {
4568
error(loc, "only outermost dimension of an array of arrays can be implicitly sized", "[]", "");
4569
arraySizes->clearInnerUnsized();
4570
}
4571
4572
if (arraySizes->isInnerSpecialization() &&
4573
(qualifier.storage != EvqTemporary && qualifier.storage != EvqGlobal && qualifier.storage != EvqShared && qualifier.storage != EvqConst))
4574
error(loc, "only outermost dimension of an array of arrays can be a specialization constant", "[]", "");
4575
4576
// desktop always allows outer-dimension-unsized variable arrays,
4577
if (!isEsProfile())
4578
return;
4579
4580
// for ES, if size isn't coming from an initializer, it has to be explicitly declared now,
4581
// with very few exceptions
4582
4583
// implicitly-sized io exceptions:
4584
switch (language) {
4585
case EShLangGeometry:
4586
if (qualifier.storage == EvqVaryingIn)
4587
if ((isEsProfile() && version >= 320) ||
4588
extensionsTurnedOn(Num_AEP_geometry_shader, AEP_geometry_shader))
4589
return;
4590
break;
4591
case EShLangTessControl:
4592
if ( qualifier.storage == EvqVaryingIn ||
4593
(qualifier.storage == EvqVaryingOut && ! qualifier.isPatch()))
4594
if ((isEsProfile() && version >= 320) ||
4595
extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
4596
return;
4597
break;
4598
case EShLangTessEvaluation:
4599
if ((qualifier.storage == EvqVaryingIn && ! qualifier.isPatch()) ||
4600
qualifier.storage == EvqVaryingOut)
4601
if ((isEsProfile() && version >= 320) ||
4602
extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
4603
return;
4604
break;
4605
case EShLangMesh:
4606
if (qualifier.storage == EvqVaryingOut)
4607
if ((isEsProfile() && version >= 320) ||
4608
extensionsTurnedOn(Num_AEP_mesh_shader, AEP_mesh_shader))
4609
return;
4610
break;
4611
default:
4612
break;
4613
}
4614
4615
// last member of ssbo block exception:
4616
if (qualifier.storage == EvqBuffer && lastMember)
4617
return;
4618
4619
arraySizeRequiredCheck(loc, *arraySizes);
4620
}
4621
4622
void TParseContext::arrayOfArrayVersionCheck(const TSourceLoc& loc, const TArraySizes* sizes)
4623
{
4624
if (sizes == nullptr || sizes->getNumDims() == 1)
4625
return;
4626
4627
const char* feature = "arrays of arrays";
4628
4629
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
4630
profileRequires(loc, EEsProfile, 310, nullptr, feature);
4631
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature);
4632
}
4633
4634
//
4635
// Do all the semantic checking for declaring or redeclaring an array, with and
4636
// without a size, and make the right changes to the symbol table.
4637
//
4638
void TParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type, TSymbol*& symbol)
4639
{
4640
if (symbol == nullptr) {
4641
bool currentScope;
4642
symbol = symbolTable.find(identifier, nullptr, &currentScope);
4643
4644
if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) {
4645
// bad shader (errors already reported) trying to redeclare a built-in name as an array
4646
symbol = nullptr;
4647
return;
4648
}
4649
if (symbol == nullptr || ! currentScope) {
4650
//
4651
// Successfully process a new definition.
4652
// (Redeclarations have to take place at the same scope; otherwise they are hiding declarations)
4653
//
4654
symbol = new TVariable(&identifier, type);
4655
symbolTable.insert(*symbol);
4656
if (symbolTable.atGlobalLevel())
4657
trackLinkage(*symbol);
4658
4659
if (! symbolTable.atBuiltInLevel()) {
4660
if (isIoResizeArray(type)) {
4661
ioArraySymbolResizeList.push_back(symbol);
4662
checkIoArraysConsistency(loc, true);
4663
} else
4664
fixIoArraySize(loc, symbol->getWritableType());
4665
}
4666
4667
return;
4668
}
4669
if (symbol->getAsAnonMember()) {
4670
error(loc, "cannot redeclare a user-block member array", identifier.c_str(), "");
4671
symbol = nullptr;
4672
return;
4673
}
4674
}
4675
4676
//
4677
// Process a redeclaration.
4678
//
4679
4680
if (symbol == nullptr) {
4681
error(loc, "array variable name expected", identifier.c_str(), "");
4682
return;
4683
}
4684
4685
// redeclareBuiltinVariable() should have already done the copyUp()
4686
TType& existingType = symbol->getWritableType();
4687
4688
if (! existingType.isArray()) {
4689
error(loc, "redeclaring non-array as array", identifier.c_str(), "");
4690
return;
4691
}
4692
4693
if (! existingType.sameElementType(type)) {
4694
error(loc, "redeclaration of array with a different element type", identifier.c_str(), "");
4695
return;
4696
}
4697
4698
if (! existingType.sameInnerArrayness(type)) {
4699
error(loc, "redeclaration of array with a different array dimensions or sizes", identifier.c_str(), "");
4700
return;
4701
}
4702
4703
if (existingType.isSizedArray()) {
4704
// be more leniant for input arrays to geometry shaders and tessellation control outputs, where the redeclaration is the same size
4705
if (! (isIoResizeArray(type) && existingType.getOuterArraySize() == type.getOuterArraySize()))
4706
error(loc, "redeclaration of array with size", identifier.c_str(), "");
4707
return;
4708
}
4709
4710
arrayLimitCheck(loc, identifier, type.getOuterArraySize());
4711
4712
existingType.updateArraySizes(type);
4713
4714
if (isIoResizeArray(type))
4715
checkIoArraysConsistency(loc);
4716
}
4717
4718
// Policy and error check for needing a runtime sized array.
4719
void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermTyped& base)
4720
{
4721
// runtime length implies runtime sizeable, so no problem
4722
if (isRuntimeLength(base))
4723
return;
4724
4725
if (base.getType().getQualifier().builtIn == EbvSampleMask)
4726
return;
4727
4728
// Check for last member of a bufferreference type, which is runtime sizeable
4729
// but doesn't support runtime length
4730
if (base.getType().getQualifier().storage == EvqBuffer) {
4731
const TIntermBinary* binary = base.getAsBinaryNode();
4732
if (binary != nullptr &&
4733
binary->getOp() == EOpIndexDirectStruct &&
4734
binary->getLeft()->isReference()) {
4735
4736
const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
4737
const int memberCount = (int)binary->getLeft()->getType().getReferentType()->getStruct()->size();
4738
if (index == memberCount - 1)
4739
return;
4740
}
4741
}
4742
4743
// check for additional things allowed by GL_EXT_nonuniform_qualifier
4744
if (base.getBasicType() == EbtSampler || base.getBasicType() == EbtAccStruct || base.getBasicType() == EbtRayQuery ||
4745
base.getBasicType() == EbtHitObjectNV || (base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer()))
4746
requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index");
4747
else
4748
error(loc, "", "[", "array must be redeclared with a size before being indexed with a variable");
4749
}
4750
4751
// Policy decision for whether a run-time .length() is allowed.
4752
bool TParseContext::isRuntimeLength(const TIntermTyped& base) const
4753
{
4754
if (base.getType().getQualifier().storage == EvqBuffer) {
4755
// in a buffer block
4756
const TIntermBinary* binary = base.getAsBinaryNode();
4757
if (binary != nullptr && binary->getOp() == EOpIndexDirectStruct) {
4758
// is it the last member?
4759
const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
4760
4761
if (binary->getLeft()->isReference())
4762
return false;
4763
4764
const int memberCount = (int)binary->getLeft()->getType().getStruct()->size();
4765
if (index == memberCount - 1)
4766
return true;
4767
}
4768
}
4769
4770
return false;
4771
}
4772
4773
// Check if mesh perviewNV attributes have a view dimension
4774
// and resize it to gl_MaxMeshViewCountNV when implicitly sized.
4775
void TParseContext::checkAndResizeMeshViewDim(const TSourceLoc& loc, TType& type, bool isBlockMember)
4776
{
4777
// see if member is a per-view attribute
4778
if (!type.getQualifier().isPerView())
4779
return;
4780
4781
if ((isBlockMember && type.isArray()) || (!isBlockMember && type.isArrayOfArrays())) {
4782
// since we don't have the maxMeshViewCountNV set during parsing builtins, we hardcode the value.
4783
int maxViewCount = parsingBuiltins ? 4 : resources.maxMeshViewCountNV;
4784
// For block members, outermost array dimension is the view dimension.
4785
// For non-block members, outermost array dimension is the vertex/primitive dimension
4786
// and 2nd outermost is the view dimension.
4787
int viewDim = isBlockMember ? 0 : 1;
4788
int viewDimSize = type.getArraySizes()->getDimSize(viewDim);
4789
4790
if (viewDimSize != UnsizedArraySize && viewDimSize != maxViewCount)
4791
error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
4792
else if (viewDimSize == UnsizedArraySize)
4793
type.getArraySizes()->setDimSize(viewDim, maxViewCount);
4794
}
4795
else {
4796
error(loc, "requires a view array dimension", "perviewNV", "");
4797
}
4798
}
4799
4800
// Returns true if the first argument to the #line directive is the line number for the next line.
4801
//
4802
// Desktop, pre-version 3.30: "After processing this directive
4803
// (including its new-line), the implementation will behave as if it is compiling at line number line+1 and
4804
// source string number source-string-number."
4805
//
4806
// Desktop, version 3.30 and later, and ES: "After processing this directive
4807
// (including its new-line), the implementation will behave as if it is compiling at line number line and
4808
// source string number source-string-number.
4809
bool TParseContext::lineDirectiveShouldSetNextLine() const
4810
{
4811
return isEsProfile() || version >= 330;
4812
}
4813
4814
//
4815
// Enforce non-initializer type/qualifier rules.
4816
//
4817
void TParseContext::nonInitConstCheck(const TSourceLoc& loc, TString& identifier, TType& type)
4818
{
4819
//
4820
// Make the qualifier make sense, given that there is not an initializer.
4821
//
4822
if (type.getQualifier().storage == EvqConst ||
4823
type.getQualifier().storage == EvqConstReadOnly) {
4824
type.getQualifier().makeTemporary();
4825
error(loc, "variables with qualifier 'const' must be initialized", identifier.c_str(), "");
4826
}
4827
}
4828
4829
//
4830
// See if the identifier is a built-in symbol that can be redeclared, and if so,
4831
// copy the symbol table's read-only built-in variable to the current
4832
// global level, where it can be modified based on the passed in type.
4833
//
4834
// Returns nullptr if no redeclaration took place; meaning a normal declaration still
4835
// needs to occur for it, not necessarily an error.
4836
//
4837
// Returns a redeclared and type-modified variable if a redeclarated occurred.
4838
//
4839
TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TString& identifier,
4840
const TQualifier& qualifier, const TShaderQualifiers& publicType)
4841
{
4842
if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel())
4843
return nullptr;
4844
4845
bool nonEsRedecls = (!isEsProfile() && (version >= 130 || identifier == "gl_TexCoord"));
4846
bool esRedecls = (isEsProfile() &&
4847
(version >= 320 || extensionsTurnedOn(Num_AEP_shader_io_blocks, AEP_shader_io_blocks)));
4848
if (! esRedecls && ! nonEsRedecls)
4849
return nullptr;
4850
4851
// Special case when using GL_ARB_separate_shader_objects
4852
bool ssoPre150 = false; // means the only reason this variable is redeclared is due to this combination
4853
if (!isEsProfile() && version <= 140 && extensionTurnedOn(E_GL_ARB_separate_shader_objects)) {
4854
if (identifier == "gl_Position" ||
4855
identifier == "gl_PointSize" ||
4856
identifier == "gl_ClipVertex" ||
4857
identifier == "gl_FogFragCoord")
4858
ssoPre150 = true;
4859
}
4860
4861
// Potentially redeclaring a built-in variable...
4862
4863
if (ssoPre150 ||
4864
(identifier == "gl_FragDepth" && ((nonEsRedecls && version >= 420) || esRedecls)) ||
4865
(identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 140) || esRedecls)) ||
4866
identifier == "gl_ClipDistance" ||
4867
identifier == "gl_CullDistance" ||
4868
identifier == "gl_ShadingRateEXT" ||
4869
identifier == "gl_PrimitiveShadingRateEXT" ||
4870
identifier == "gl_FrontColor" ||
4871
identifier == "gl_BackColor" ||
4872
identifier == "gl_FrontSecondaryColor" ||
4873
identifier == "gl_BackSecondaryColor" ||
4874
identifier == "gl_SecondaryColor" ||
4875
(identifier == "gl_Color" && language == EShLangFragment) ||
4876
(identifier == "gl_FragStencilRefARB" && (nonEsRedecls && version >= 140)
4877
&& language == EShLangFragment) ||
4878
identifier == "gl_SampleMask" ||
4879
identifier == "gl_Layer" ||
4880
identifier == "gl_PrimitiveIndicesNV" ||
4881
identifier == "gl_PrimitivePointIndicesEXT" ||
4882
identifier == "gl_PrimitiveLineIndicesEXT" ||
4883
identifier == "gl_PrimitiveTriangleIndicesEXT" ||
4884
identifier == "gl_TexCoord") {
4885
4886
// Find the existing symbol, if any.
4887
bool builtIn;
4888
TSymbol* symbol = symbolTable.find(identifier, &builtIn);
4889
4890
// If the symbol was not found, this must be a version/profile/stage
4891
// that doesn't have it.
4892
if (! symbol)
4893
return nullptr;
4894
4895
// If it wasn't at a built-in level, then it's already been redeclared;
4896
// that is, this is a redeclaration of a redeclaration; reuse that initial
4897
// redeclaration. Otherwise, make the new one.
4898
if (builtIn) {
4899
makeEditable(symbol);
4900
symbolTable.amendSymbolIdLevel(*symbol);
4901
}
4902
4903
// Now, modify the type of the copy, as per the type of the current redeclaration.
4904
4905
TQualifier& symbolQualifier = symbol->getWritableType().getQualifier();
4906
if (ssoPre150) {
4907
if (intermediate.inIoAccessed(identifier))
4908
error(loc, "cannot redeclare after use", identifier.c_str(), "");
4909
if (qualifier.hasLayout())
4910
error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
4911
if (qualifier.isMemory() || qualifier.isAuxiliary() || (language == EShLangVertex && qualifier.storage != EvqVaryingOut) ||
4912
(language == EShLangFragment && qualifier.storage != EvqVaryingIn))
4913
error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
4914
if (! qualifier.smooth)
4915
error(loc, "cannot change interpolation qualification of", "redeclaration", symbol->getName().c_str());
4916
} else if (identifier == "gl_FrontColor" ||
4917
identifier == "gl_BackColor" ||
4918
identifier == "gl_FrontSecondaryColor" ||
4919
identifier == "gl_BackSecondaryColor" ||
4920
identifier == "gl_SecondaryColor" ||
4921
identifier == "gl_Color") {
4922
symbolQualifier.flat = qualifier.flat;
4923
symbolQualifier.smooth = qualifier.smooth;
4924
symbolQualifier.nopersp = qualifier.nopersp;
4925
if (qualifier.hasLayout())
4926
error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
4927
if (qualifier.isMemory() || qualifier.isAuxiliary() || symbol->getType().getQualifier().storage != qualifier.storage)
4928
error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
4929
} else if (identifier == "gl_TexCoord" ||
4930
identifier == "gl_ClipDistance" ||
4931
identifier == "gl_CullDistance") {
4932
if (qualifier.hasLayout() || qualifier.isMemory() || qualifier.isAuxiliary() ||
4933
qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
4934
symbolQualifier.storage != qualifier.storage)
4935
error(loc, "cannot change qualification of", "redeclaration", symbol->getName().c_str());
4936
} else if (identifier == "gl_FragCoord") {
4937
if (!intermediate.getTexCoordRedeclared() && intermediate.inIoAccessed("gl_FragCoord"))
4938
error(loc, "cannot redeclare after use", "gl_FragCoord", "");
4939
if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
4940
qualifier.isMemory() || qualifier.isAuxiliary())
4941
error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
4942
if (qualifier.storage != EvqVaryingIn)
4943
error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str());
4944
if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() ||
4945
publicType.originUpperLeft != intermediate.getOriginUpperLeft()))
4946
error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str());
4947
4948
4949
intermediate.setTexCoordRedeclared();
4950
if (publicType.pixelCenterInteger)
4951
intermediate.setPixelCenterInteger();
4952
if (publicType.originUpperLeft)
4953
intermediate.setOriginUpperLeft();
4954
} else if (identifier == "gl_FragDepth") {
4955
if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
4956
qualifier.isMemory() || qualifier.isAuxiliary())
4957
error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
4958
if (qualifier.storage != EvqVaryingOut)
4959
error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
4960
if (publicType.layoutDepth != EldNone) {
4961
if (intermediate.inIoAccessed("gl_FragDepth"))
4962
error(loc, "cannot redeclare after use", "gl_FragDepth", "");
4963
if (! intermediate.setDepth(publicType.layoutDepth))
4964
error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str());
4965
}
4966
} else if (identifier == "gl_FragStencilRefARB") {
4967
if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
4968
qualifier.isMemory() || qualifier.isAuxiliary())
4969
error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
4970
if (qualifier.storage != EvqVaryingOut)
4971
error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
4972
if (publicType.layoutStencil != ElsNone) {
4973
if (intermediate.inIoAccessed("gl_FragStencilRefARB"))
4974
error(loc, "cannot redeclare after use", "gl_FragStencilRefARB", "");
4975
if (!intermediate.setStencil(publicType.layoutStencil))
4976
error(loc, "all redeclarations must use the same stencil layout on", "redeclaration",
4977
symbol->getName().c_str());
4978
}
4979
}
4980
else if (
4981
identifier == "gl_PrimitiveIndicesNV") {
4982
if (qualifier.hasLayout())
4983
error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
4984
if (qualifier.storage != EvqVaryingOut)
4985
error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
4986
}
4987
else if (identifier == "gl_SampleMask") {
4988
if (!publicType.layoutOverrideCoverage) {
4989
error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str());
4990
}
4991
intermediate.setLayoutOverrideCoverage();
4992
}
4993
else if (identifier == "gl_Layer") {
4994
if (!qualifier.layoutViewportRelative && qualifier.layoutSecondaryViewportRelativeOffset == -2048)
4995
error(loc, "redeclaration only allowed for viewport_relative or secondary_view_offset layout", "redeclaration", symbol->getName().c_str());
4996
symbolQualifier.layoutViewportRelative = qualifier.layoutViewportRelative;
4997
symbolQualifier.layoutSecondaryViewportRelativeOffset = qualifier.layoutSecondaryViewportRelativeOffset;
4998
}
4999
5000
// TODO: semantics quality: separate smooth from nothing declared, then use IsInterpolation for several tests above
5001
5002
return symbol;
5003
}
5004
5005
return nullptr;
5006
}
5007
5008
//
5009
// Either redeclare the requested block, or give an error message why it can't be done.
5010
//
5011
// TODO: functionality: explicitly sizing members of redeclared blocks is not giving them an explicit size
5012
void TParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& newTypeList, const TString& blockName,
5013
const TString* instanceName, TArraySizes* arraySizes)
5014
{
5015
const char* feature = "built-in block redeclaration";
5016
profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
5017
profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
5018
5019
if (blockName != "gl_PerVertex" && blockName != "gl_PerFragment" &&
5020
blockName != "gl_MeshPerVertexNV" && blockName != "gl_MeshPerPrimitiveNV" &&
5021
blockName != "gl_MeshPerVertexEXT" && blockName != "gl_MeshPerPrimitiveEXT") {
5022
error(loc, "cannot redeclare block: ", "block declaration", blockName.c_str());
5023
return;
5024
}
5025
5026
// Redeclaring a built-in block...
5027
5028
if (instanceName && ! builtInName(*instanceName)) {
5029
error(loc, "cannot redeclare a built-in block with a user name", instanceName->c_str(), "");
5030
return;
5031
}
5032
5033
// Blocks with instance names are easy to find, lookup the instance name,
5034
// Anonymous blocks need to be found via a member.
5035
bool builtIn;
5036
TSymbol* block;
5037
if (instanceName)
5038
block = symbolTable.find(*instanceName, &builtIn);
5039
else
5040
block = symbolTable.find(newTypeList.front().type->getFieldName(), &builtIn);
5041
5042
// If the block was not found, this must be a version/profile/stage
5043
// that doesn't have it, or the instance name is wrong.
5044
const char* errorName = instanceName ? instanceName->c_str() : newTypeList.front().type->getFieldName().c_str();
5045
if (! block) {
5046
error(loc, "no declaration found for redeclaration", errorName, "");
5047
return;
5048
}
5049
// Built-in blocks cannot be redeclared more than once, which if happened,
5050
// we'd be finding the already redeclared one here, rather than the built in.
5051
if (! builtIn) {
5052
error(loc, "can only redeclare a built-in block once, and before any use", blockName.c_str(), "");
5053
return;
5054
}
5055
5056
// Copy the block to make a writable version, to insert into the block table after editing.
5057
block = symbolTable.copyUpDeferredInsert(block);
5058
5059
if (block->getType().getBasicType() != EbtBlock) {
5060
error(loc, "cannot redeclare a non block as a block", errorName, "");
5061
return;
5062
}
5063
5064
// Fix XFB stuff up, it applies to the order of the redeclaration, not
5065
// the order of the original members.
5066
if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
5067
if (!currentBlockQualifier.hasXfbBuffer())
5068
currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
5069
if (!currentBlockQualifier.hasStream())
5070
currentBlockQualifier.layoutStream = globalOutputDefaults.layoutStream;
5071
fixXfbOffsets(currentBlockQualifier, newTypeList);
5072
}
5073
5074
// Edit and error check the container against the redeclaration
5075
// - remove unused members
5076
// - ensure remaining qualifiers/types match
5077
5078
TType& type = block->getWritableType();
5079
5080
// if gl_PerVertex is redeclared for the purpose of passing through "gl_Position"
5081
// for passthrough purpose, the redeclared block should have the same qualifers as
5082
// the current one
5083
if (currentBlockQualifier.layoutPassthrough) {
5084
type.getQualifier().layoutPassthrough = currentBlockQualifier.layoutPassthrough;
5085
type.getQualifier().storage = currentBlockQualifier.storage;
5086
type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
5087
type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
5088
}
5089
5090
TTypeList::iterator member = type.getWritableStruct()->begin();
5091
size_t numOriginalMembersFound = 0;
5092
while (member != type.getStruct()->end()) {
5093
// look for match
5094
bool found = false;
5095
TTypeList::const_iterator newMember;
5096
TSourceLoc memberLoc;
5097
memberLoc.init();
5098
for (newMember = newTypeList.begin(); newMember != newTypeList.end(); ++newMember) {
5099
if (member->type->getFieldName() == newMember->type->getFieldName()) {
5100
found = true;
5101
memberLoc = newMember->loc;
5102
break;
5103
}
5104
}
5105
5106
if (found) {
5107
++numOriginalMembersFound;
5108
// - ensure match between redeclared members' types
5109
// - check for things that can't be changed
5110
// - update things that can be changed
5111
TType& oldType = *member->type;
5112
const TType& newType = *newMember->type;
5113
if (! newType.sameElementType(oldType))
5114
error(memberLoc, "cannot redeclare block member with a different type", member->type->getFieldName().c_str(), "");
5115
if (oldType.isArray() != newType.isArray())
5116
error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
5117
else if (! oldType.getQualifier().isPerView() && ! oldType.sameArrayness(newType) && oldType.isSizedArray())
5118
error(memberLoc, "cannot change array size of redeclared block member", member->type->getFieldName().c_str(), "");
5119
else if (! oldType.getQualifier().isPerView() && newType.isArray())
5120
arrayLimitCheck(loc, member->type->getFieldName(), newType.getOuterArraySize());
5121
if (oldType.getQualifier().isPerView() && ! newType.getQualifier().isPerView())
5122
error(memberLoc, "missing perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
5123
else if (! oldType.getQualifier().isPerView() && newType.getQualifier().isPerView())
5124
error(memberLoc, "cannot add perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
5125
else if (newType.getQualifier().isPerView()) {
5126
if (oldType.getArraySizes()->getNumDims() != newType.getArraySizes()->getNumDims())
5127
error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
5128
else if (! newType.isUnsizedArray() && newType.getOuterArraySize() != resources.maxMeshViewCountNV)
5129
error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
5130
else if (newType.getArraySizes()->getNumDims() == 2) {
5131
int innerDimSize = newType.getArraySizes()->getDimSize(1);
5132
arrayLimitCheck(memberLoc, member->type->getFieldName(), innerDimSize);
5133
oldType.getArraySizes()->setDimSize(1, innerDimSize);
5134
}
5135
}
5136
if (oldType.getQualifier().isPerPrimitive() && ! newType.getQualifier().isPerPrimitive())
5137
error(memberLoc, "missing perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
5138
else if (! oldType.getQualifier().isPerPrimitive() && newType.getQualifier().isPerPrimitive())
5139
error(memberLoc, "cannot add perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
5140
if (newType.getQualifier().isMemory())
5141
error(memberLoc, "cannot add memory qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
5142
if (newType.getQualifier().hasNonXfbLayout())
5143
error(memberLoc, "cannot add non-XFB layout to redeclared block member", member->type->getFieldName().c_str(), "");
5144
if (newType.getQualifier().patch)
5145
error(memberLoc, "cannot add patch to redeclared block member", member->type->getFieldName().c_str(), "");
5146
if (newType.getQualifier().hasXfbBuffer() &&
5147
newType.getQualifier().layoutXfbBuffer != currentBlockQualifier.layoutXfbBuffer)
5148
error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
5149
if (newType.getQualifier().hasStream() &&
5150
newType.getQualifier().layoutStream != currentBlockQualifier.layoutStream)
5151
error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_stream", "");
5152
oldType.getQualifier().centroid = newType.getQualifier().centroid;
5153
oldType.getQualifier().sample = newType.getQualifier().sample;
5154
oldType.getQualifier().invariant = newType.getQualifier().invariant;
5155
oldType.getQualifier().noContraction = newType.getQualifier().noContraction;
5156
oldType.getQualifier().smooth = newType.getQualifier().smooth;
5157
oldType.getQualifier().flat = newType.getQualifier().flat;
5158
oldType.getQualifier().nopersp = newType.getQualifier().nopersp;
5159
oldType.getQualifier().layoutXfbOffset = newType.getQualifier().layoutXfbOffset;
5160
oldType.getQualifier().layoutXfbBuffer = newType.getQualifier().layoutXfbBuffer;
5161
oldType.getQualifier().layoutXfbStride = newType.getQualifier().layoutXfbStride;
5162
if (oldType.getQualifier().layoutXfbOffset != TQualifier::layoutXfbBufferEnd) {
5163
// If any member has an xfb_offset, then the block's xfb_buffer inherents current xfb_buffer,
5164
// and for xfb processing, the member needs it as well, along with xfb_stride.
5165
type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
5166
oldType.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
5167
}
5168
if (oldType.isUnsizedArray() && newType.isSizedArray())
5169
oldType.changeOuterArraySize(newType.getOuterArraySize());
5170
5171
// check and process the member's type, which will include managing xfb information
5172
layoutTypeCheck(loc, oldType);
5173
5174
// go to next member
5175
++member;
5176
} else {
5177
// For missing members of anonymous blocks that have been redeclared,
5178
// hide the original (shared) declaration.
5179
// Instance-named blocks can just have the member removed.
5180
if (instanceName)
5181
member = type.getWritableStruct()->erase(member);
5182
else {
5183
member->type->hideMember();
5184
++member;
5185
}
5186
}
5187
}
5188
5189
if (spvVersion.vulkan > 0) {
5190
// ...then streams apply to built-in blocks, instead of them being only on stream 0
5191
type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
5192
}
5193
5194
if (numOriginalMembersFound < newTypeList.size())
5195
error(loc, "block redeclaration has extra members", blockName.c_str(), "");
5196
if (type.isArray() != (arraySizes != nullptr) ||
5197
(type.isArray() && arraySizes != nullptr && type.getArraySizes()->getNumDims() != arraySizes->getNumDims()))
5198
error(loc, "cannot change arrayness of redeclared block", blockName.c_str(), "");
5199
else if (type.isArray()) {
5200
// At this point, we know both are arrays and both have the same number of dimensions.
5201
5202
// It is okay for a built-in block redeclaration to be unsized, and keep the size of the
5203
// original block declaration.
5204
if (!arraySizes->isSized() && type.isSizedArray())
5205
arraySizes->changeOuterSize(type.getOuterArraySize());
5206
5207
// And, okay to be giving a size to the array, by the redeclaration
5208
if (!type.isSizedArray() && arraySizes->isSized())
5209
type.changeOuterArraySize(arraySizes->getOuterSize());
5210
5211
// Now, they must match in all dimensions.
5212
if (type.isSizedArray() && *type.getArraySizes() != *arraySizes)
5213
error(loc, "cannot change array size of redeclared block", blockName.c_str(), "");
5214
}
5215
5216
symbolTable.insert(*block);
5217
5218
// Check for general layout qualifier errors
5219
layoutObjectCheck(loc, *block);
5220
5221
// Tracking for implicit sizing of array
5222
if (isIoResizeArray(block->getType())) {
5223
ioArraySymbolResizeList.push_back(block);
5224
checkIoArraysConsistency(loc, true);
5225
} else if (block->getType().isArray())
5226
fixIoArraySize(loc, block->getWritableType());
5227
5228
// Save it in the AST for linker use.
5229
trackLinkage(*block);
5230
}
5231
5232
void TParseContext::paramCheckFixStorage(const TSourceLoc& loc, const TStorageQualifier& qualifier, TType& type)
5233
{
5234
switch (qualifier) {
5235
case EvqConst:
5236
case EvqConstReadOnly:
5237
type.getQualifier().storage = EvqConstReadOnly;
5238
break;
5239
case EvqIn:
5240
case EvqOut:
5241
case EvqInOut:
5242
case EvqTileImageEXT:
5243
type.getQualifier().storage = qualifier;
5244
break;
5245
case EvqGlobal:
5246
case EvqTemporary:
5247
type.getQualifier().storage = EvqIn;
5248
break;
5249
default:
5250
type.getQualifier().storage = EvqIn;
5251
error(loc, "storage qualifier not allowed on function parameter", GetStorageQualifierString(qualifier), "");
5252
break;
5253
}
5254
}
5255
5256
void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& qualifier, TType& type)
5257
{
5258
if (qualifier.isMemory()) {
5259
type.getQualifier().volatil = qualifier.volatil;
5260
type.getQualifier().coherent = qualifier.coherent;
5261
type.getQualifier().devicecoherent = qualifier.devicecoherent ;
5262
type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent;
5263
type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent;
5264
type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent;
5265
type.getQualifier().shadercallcoherent = qualifier.shadercallcoherent;
5266
type.getQualifier().nonprivate = qualifier.nonprivate;
5267
type.getQualifier().readonly = qualifier.readonly;
5268
type.getQualifier().writeonly = qualifier.writeonly;
5269
type.getQualifier().restrict = qualifier.restrict;
5270
}
5271
5272
if (qualifier.isAuxiliary() ||
5273
qualifier.isInterpolation())
5274
error(loc, "cannot use auxiliary or interpolation qualifiers on a function parameter", "", "");
5275
if (qualifier.hasLayout())
5276
error(loc, "cannot use layout qualifiers on a function parameter", "", "");
5277
if (qualifier.invariant)
5278
error(loc, "cannot use invariant qualifier on a function parameter", "", "");
5279
if (qualifier.isNoContraction()) {
5280
if (qualifier.isParamOutput())
5281
type.getQualifier().setNoContraction();
5282
else
5283
warn(loc, "qualifier has no effect on non-output parameters", "precise", "");
5284
}
5285
if (qualifier.isNonUniform())
5286
type.getQualifier().nonUniform = qualifier.nonUniform;
5287
if (qualifier.isSpirvByReference())
5288
type.getQualifier().setSpirvByReference();
5289
if (qualifier.isSpirvLiteral()) {
5290
if (type.getBasicType() == EbtFloat || type.getBasicType() == EbtInt || type.getBasicType() == EbtUint ||
5291
type.getBasicType() == EbtBool)
5292
type.getQualifier().setSpirvLiteral();
5293
else
5294
error(loc, "cannot use spirv_literal qualifier", type.getBasicTypeString().c_str(), "");
5295
}
5296
5297
paramCheckFixStorage(loc, qualifier.storage, type);
5298
}
5299
5300
void TParseContext::nestedBlockCheck(const TSourceLoc& loc)
5301
{
5302
if (structNestingLevel > 0 || blockNestingLevel > 0)
5303
error(loc, "cannot nest a block definition inside a structure or block", "", "");
5304
++blockNestingLevel;
5305
}
5306
5307
void TParseContext::nestedStructCheck(const TSourceLoc& loc)
5308
{
5309
if (structNestingLevel > 0 || blockNestingLevel > 0)
5310
error(loc, "cannot nest a structure definition inside a structure or block", "", "");
5311
++structNestingLevel;
5312
}
5313
5314
void TParseContext::arrayObjectCheck(const TSourceLoc& loc, const TType& type, const char* op)
5315
{
5316
// Some versions don't allow comparing arrays or structures containing arrays
5317
if (type.containsArray()) {
5318
profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, op);
5319
profileRequires(loc, EEsProfile, 300, nullptr, op);
5320
}
5321
}
5322
5323
void TParseContext::opaqueCheck(const TSourceLoc& loc, const TType& type, const char* op)
5324
{
5325
if (containsFieldWithBasicType(type, EbtSampler) && !extensionTurnedOn(E_GL_ARB_bindless_texture))
5326
error(loc, "can't use with samplers or structs containing samplers", op, "");
5327
}
5328
5329
void TParseContext::referenceCheck(const TSourceLoc& loc, const TType& type, const char* op)
5330
{
5331
if (containsFieldWithBasicType(type, EbtReference))
5332
error(loc, "can't use with reference types", op, "");
5333
}
5334
5335
void TParseContext::storage16BitAssignmentCheck(const TSourceLoc& loc, const TType& type, const char* op)
5336
{
5337
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtFloat16))
5338
requireFloat16Arithmetic(loc, op, "can't use with structs containing float16");
5339
5340
if (type.isArray() && type.getBasicType() == EbtFloat16)
5341
requireFloat16Arithmetic(loc, op, "can't use with arrays containing float16");
5342
5343
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt16))
5344
requireInt16Arithmetic(loc, op, "can't use with structs containing int16");
5345
5346
if (type.isArray() && type.getBasicType() == EbtInt16)
5347
requireInt16Arithmetic(loc, op, "can't use with arrays containing int16");
5348
5349
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint16))
5350
requireInt16Arithmetic(loc, op, "can't use with structs containing uint16");
5351
5352
if (type.isArray() && type.getBasicType() == EbtUint16)
5353
requireInt16Arithmetic(loc, op, "can't use with arrays containing uint16");
5354
5355
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt8))
5356
requireInt8Arithmetic(loc, op, "can't use with structs containing int8");
5357
5358
if (type.isArray() && type.getBasicType() == EbtInt8)
5359
requireInt8Arithmetic(loc, op, "can't use with arrays containing int8");
5360
5361
if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint8))
5362
requireInt8Arithmetic(loc, op, "can't use with structs containing uint8");
5363
5364
if (type.isArray() && type.getBasicType() == EbtUint8)
5365
requireInt8Arithmetic(loc, op, "can't use with arrays containing uint8");
5366
}
5367
5368
void TParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op)
5369
{
5370
if (type.containsSpecializationSize())
5371
error(loc, "can't use with types containing arrays sized with a specialization constant", op, "");
5372
}
5373
5374
void TParseContext::structTypeCheck(const TSourceLoc& /*loc*/, TPublicType& publicType)
5375
{
5376
const TTypeList& typeList = *publicType.userDef->getStruct();
5377
5378
// fix and check for member storage qualifiers and types that don't belong within a structure
5379
for (unsigned int member = 0; member < typeList.size(); ++member) {
5380
TQualifier& memberQualifier = typeList[member].type->getQualifier();
5381
const TSourceLoc& memberLoc = typeList[member].loc;
5382
if (memberQualifier.isAuxiliary() ||
5383
memberQualifier.isInterpolation() ||
5384
(memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal))
5385
error(memberLoc, "cannot use storage or interpolation qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
5386
if (memberQualifier.isMemory())
5387
error(memberLoc, "cannot use memory qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
5388
if (memberQualifier.hasLayout()) {
5389
error(memberLoc, "cannot use layout qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
5390
memberQualifier.clearLayout();
5391
}
5392
if (memberQualifier.invariant)
5393
error(memberLoc, "cannot use invariant qualifier on structure members", typeList[member].type->getFieldName().c_str(), "");
5394
}
5395
}
5396
5397
//
5398
// See if this loop satisfies the limitations for ES 2.0 (version 100) for loops in Appendex A:
5399
//
5400
// "The loop index has type int or float.
5401
//
5402
// "The for statement has the form:
5403
// for ( init-declaration ; condition ; expression )
5404
// init-declaration has the form: type-specifier identifier = constant-expression
5405
// condition has the form: loop-index relational_operator constant-expression
5406
// where relational_operator is one of: > >= < <= == or !=
5407
// expression [sic] has one of the following forms:
5408
// loop-index++
5409
// loop-index--
5410
// loop-index += constant-expression
5411
// loop-index -= constant-expression
5412
//
5413
// The body is handled in an AST traversal.
5414
//
5415
void TParseContext::inductiveLoopCheck(const TSourceLoc& loc, TIntermNode* init, TIntermLoop* loop)
5416
{
5417
// loop index init must exist and be a declaration, which shows up in the AST as an aggregate of size 1 of the declaration
5418
bool badInit = false;
5419
if (! init || ! init->getAsAggregate() || init->getAsAggregate()->getSequence().size() != 1)
5420
badInit = true;
5421
TIntermBinary* binaryInit = nullptr;
5422
if (! badInit) {
5423
// get the declaration assignment
5424
binaryInit = init->getAsAggregate()->getSequence()[0]->getAsBinaryNode();
5425
if (! binaryInit)
5426
badInit = true;
5427
}
5428
if (badInit) {
5429
error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
5430
return;
5431
}
5432
5433
// loop index must be type int or float
5434
if (! binaryInit->getType().isScalar() || (binaryInit->getBasicType() != EbtInt && binaryInit->getBasicType() != EbtFloat)) {
5435
error(loc, "inductive loop requires a scalar 'int' or 'float' loop index", "limitations", "");
5436
return;
5437
}
5438
5439
// init is the form "loop-index = constant"
5440
if (binaryInit->getOp() != EOpAssign || ! binaryInit->getLeft()->getAsSymbolNode() || ! binaryInit->getRight()->getAsConstantUnion()) {
5441
error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
5442
return;
5443
}
5444
5445
// get the unique id of the loop index
5446
long long loopIndex = binaryInit->getLeft()->getAsSymbolNode()->getId();
5447
inductiveLoopIds.insert(loopIndex);
5448
5449
// condition's form must be "loop-index relational-operator constant-expression"
5450
bool badCond = ! loop->getTest();
5451
if (! badCond) {
5452
TIntermBinary* binaryCond = loop->getTest()->getAsBinaryNode();
5453
badCond = ! binaryCond;
5454
if (! badCond) {
5455
switch (binaryCond->getOp()) {
5456
case EOpGreaterThan:
5457
case EOpGreaterThanEqual:
5458
case EOpLessThan:
5459
case EOpLessThanEqual:
5460
case EOpEqual:
5461
case EOpNotEqual:
5462
break;
5463
default:
5464
badCond = true;
5465
}
5466
}
5467
if (binaryCond && (! binaryCond->getLeft()->getAsSymbolNode() ||
5468
binaryCond->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
5469
! binaryCond->getRight()->getAsConstantUnion()))
5470
badCond = true;
5471
}
5472
if (badCond) {
5473
error(loc, "inductive-loop condition requires the form \"loop-index <comparison-op> constant-expression\"", "limitations", "");
5474
return;
5475
}
5476
5477
// loop-index++
5478
// loop-index--
5479
// loop-index += constant-expression
5480
// loop-index -= constant-expression
5481
bool badTerminal = ! loop->getTerminal();
5482
if (! badTerminal) {
5483
TIntermUnary* unaryTerminal = loop->getTerminal()->getAsUnaryNode();
5484
TIntermBinary* binaryTerminal = loop->getTerminal()->getAsBinaryNode();
5485
if (unaryTerminal || binaryTerminal) {
5486
switch(loop->getTerminal()->getAsOperator()->getOp()) {
5487
case EOpPostDecrement:
5488
case EOpPostIncrement:
5489
case EOpAddAssign:
5490
case EOpSubAssign:
5491
break;
5492
default:
5493
badTerminal = true;
5494
}
5495
} else
5496
badTerminal = true;
5497
if (binaryTerminal && (! binaryTerminal->getLeft()->getAsSymbolNode() ||
5498
binaryTerminal->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
5499
! binaryTerminal->getRight()->getAsConstantUnion()))
5500
badTerminal = true;
5501
if (unaryTerminal && (! unaryTerminal->getOperand()->getAsSymbolNode() ||
5502
unaryTerminal->getOperand()->getAsSymbolNode()->getId() != loopIndex))
5503
badTerminal = true;
5504
}
5505
if (badTerminal) {
5506
error(loc, "inductive-loop termination requires the form \"loop-index++, loop-index--, loop-index += constant-expression, or loop-index -= constant-expression\"", "limitations", "");
5507
return;
5508
}
5509
5510
// the body
5511
inductiveLoopBodyCheck(loop->getBody(), loopIndex, symbolTable);
5512
}
5513
5514
// Do limit checks for built-in arrays.
5515
void TParseContext::arrayLimitCheck(const TSourceLoc& loc, const TString& identifier, int size)
5516
{
5517
if (identifier.compare("gl_TexCoord") == 0)
5518
limitCheck(loc, size, "gl_MaxTextureCoords", "gl_TexCoord array size");
5519
else if (identifier.compare("gl_ClipDistance") == 0)
5520
limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistance array size");
5521
else if (identifier.compare("gl_CullDistance") == 0)
5522
limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistance array size");
5523
else if (identifier.compare("gl_ClipDistancePerViewNV") == 0)
5524
limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistancePerViewNV array size");
5525
else if (identifier.compare("gl_CullDistancePerViewNV") == 0)
5526
limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistancePerViewNV array size");
5527
}
5528
5529
// See if the provided value is less than or equal to the symbol indicated by limit,
5530
// which should be a constant in the symbol table.
5531
void TParseContext::limitCheck(const TSourceLoc& loc, int value, const char* limit, const char* feature)
5532
{
5533
TSymbol* symbol = symbolTable.find(limit);
5534
assert(symbol->getAsVariable());
5535
const TConstUnionArray& constArray = symbol->getAsVariable()->getConstArray();
5536
assert(! constArray.empty());
5537
if (value > constArray[0].getIConst())
5538
error(loc, "must be less than or equal to", feature, "%s (%d)", limit, constArray[0].getIConst());
5539
}
5540
5541
//
5542
// Do any additional error checking, etc., once we know the parsing is done.
5543
//
5544
void TParseContext::finish()
5545
{
5546
TParseContextBase::finish();
5547
5548
if (parsingBuiltins)
5549
return;
5550
5551
// Check on array indexes for ES 2.0 (version 100) limitations.
5552
for (size_t i = 0; i < needsIndexLimitationChecking.size(); ++i)
5553
constantIndexExpressionCheck(needsIndexLimitationChecking[i]);
5554
5555
// Check for stages that are enabled by extension.
5556
// Can't do this at the beginning, it is chicken and egg to add a stage by
5557
// extension.
5558
// Stage-specific features were correctly tested for already, this is just
5559
// about the stage itself.
5560
switch (language) {
5561
case EShLangGeometry:
5562
if (isEsProfile() && version == 310)
5563
requireExtensions(getCurrentLoc(), Num_AEP_geometry_shader, AEP_geometry_shader, "geometry shaders");
5564
break;
5565
case EShLangTessControl:
5566
case EShLangTessEvaluation:
5567
if (isEsProfile() && version == 310)
5568
requireExtensions(getCurrentLoc(), Num_AEP_tessellation_shader, AEP_tessellation_shader, "tessellation shaders");
5569
else if (!isEsProfile() && version < 400)
5570
requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_tessellation_shader, "tessellation shaders");
5571
break;
5572
case EShLangCompute:
5573
if (!isEsProfile() && version < 430)
5574
requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_compute_shader, "compute shaders");
5575
break;
5576
case EShLangTask:
5577
requireExtensions(getCurrentLoc(), Num_AEP_mesh_shader, AEP_mesh_shader, "task shaders");
5578
break;
5579
case EShLangMesh:
5580
requireExtensions(getCurrentLoc(), Num_AEP_mesh_shader, AEP_mesh_shader, "mesh shaders");
5581
break;
5582
default:
5583
break;
5584
}
5585
5586
// Set default outputs for GL_NV_geometry_shader_passthrough
5587
if (language == EShLangGeometry && extensionTurnedOn(E_SPV_NV_geometry_shader_passthrough)) {
5588
if (intermediate.getOutputPrimitive() == ElgNone) {
5589
switch (intermediate.getInputPrimitive()) {
5590
case ElgPoints: intermediate.setOutputPrimitive(ElgPoints); break;
5591
case ElgLines: intermediate.setOutputPrimitive(ElgLineStrip); break;
5592
case ElgTriangles: intermediate.setOutputPrimitive(ElgTriangleStrip); break;
5593
default: break;
5594
}
5595
}
5596
if (intermediate.getVertices() == TQualifier::layoutNotSet) {
5597
switch (intermediate.getInputPrimitive()) {
5598
case ElgPoints: intermediate.setVertices(1); break;
5599
case ElgLines: intermediate.setVertices(2); break;
5600
case ElgTriangles: intermediate.setVertices(3); break;
5601
default: break;
5602
}
5603
}
5604
}
5605
}
5606
5607
//
5608
// Layout qualifier stuff.
5609
//
5610
5611
// Put the id's layout qualification into the public type, for qualifiers not having a number set.
5612
// This is before we know any type information for error checking.
5613
void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id)
5614
{
5615
std::transform(id.begin(), id.end(), id.begin(), ::tolower);
5616
5617
if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) {
5618
publicType.qualifier.layoutMatrix = ElmColumnMajor;
5619
return;
5620
}
5621
if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) {
5622
publicType.qualifier.layoutMatrix = ElmRowMajor;
5623
return;
5624
}
5625
if (id == TQualifier::getLayoutPackingString(ElpPacked)) {
5626
if (spvVersion.spv != 0) {
5627
if (spvVersion.vulkanRelaxed)
5628
return; // silently ignore qualifier
5629
else
5630
spvRemoved(loc, "packed");
5631
}
5632
publicType.qualifier.layoutPacking = ElpPacked;
5633
return;
5634
}
5635
if (id == TQualifier::getLayoutPackingString(ElpShared)) {
5636
if (spvVersion.spv != 0) {
5637
if (spvVersion.vulkanRelaxed)
5638
return; // silently ignore qualifier
5639
else
5640
spvRemoved(loc, "shared");
5641
}
5642
publicType.qualifier.layoutPacking = ElpShared;
5643
return;
5644
}
5645
if (id == TQualifier::getLayoutPackingString(ElpStd140)) {
5646
publicType.qualifier.layoutPacking = ElpStd140;
5647
return;
5648
}
5649
if (id == TQualifier::getLayoutPackingString(ElpStd430)) {
5650
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430");
5651
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "std430");
5652
profileRequires(loc, EEsProfile, 310, nullptr, "std430");
5653
publicType.qualifier.layoutPacking = ElpStd430;
5654
return;
5655
}
5656
if (id == TQualifier::getLayoutPackingString(ElpScalar)) {
5657
requireVulkan(loc, "scalar");
5658
requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "scalar block layout");
5659
publicType.qualifier.layoutPacking = ElpScalar;
5660
return;
5661
}
5662
// TODO: compile-time performance: may need to stop doing linear searches
5663
for (TLayoutFormat format = (TLayoutFormat)(ElfNone + 1); format < ElfCount; format = (TLayoutFormat)(format + 1)) {
5664
if (id == TQualifier::getLayoutFormatString(format)) {
5665
if ((format > ElfEsFloatGuard && format < ElfFloatGuard) ||
5666
(format > ElfEsIntGuard && format < ElfIntGuard) ||
5667
(format > ElfEsUintGuard && format < ElfCount))
5668
requireProfile(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, "image load-store format");
5669
profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "image load store");
5670
profileRequires(loc, EEsProfile, 310, E_GL_ARB_shader_image_load_store, "image load store");
5671
publicType.qualifier.layoutFormat = format;
5672
return;
5673
}
5674
}
5675
if (id == "push_constant") {
5676
requireVulkan(loc, "push_constant");
5677
publicType.qualifier.layoutPushConstant = true;
5678
return;
5679
}
5680
if (id == "buffer_reference") {
5681
requireVulkan(loc, "buffer_reference");
5682
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference");
5683
publicType.qualifier.layoutBufferReference = true;
5684
intermediate.setUseStorageBuffer();
5685
intermediate.setUsePhysicalStorageBuffer();
5686
return;
5687
}
5688
if (id == "bindless_sampler") {
5689
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "bindless_sampler");
5690
publicType.qualifier.layoutBindlessSampler = true;
5691
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeLayout);
5692
return;
5693
}
5694
if (id == "bindless_image") {
5695
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "bindless_image");
5696
publicType.qualifier.layoutBindlessImage = true;
5697
intermediate.setBindlessImageMode(currentCaller, AstRefTypeLayout);
5698
return;
5699
}
5700
if (id == "bound_sampler") {
5701
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "bound_sampler");
5702
publicType.qualifier.layoutBindlessSampler = false;
5703
return;
5704
}
5705
if (id == "bound_image") {
5706
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "bound_image");
5707
publicType.qualifier.layoutBindlessImage = false;
5708
return;
5709
}
5710
if (language == EShLangGeometry || language == EShLangTessEvaluation || language == EShLangMesh) {
5711
if (id == TQualifier::getGeometryString(ElgTriangles)) {
5712
publicType.shaderQualifiers.geometry = ElgTriangles;
5713
return;
5714
}
5715
if (language == EShLangGeometry || language == EShLangMesh) {
5716
if (id == TQualifier::getGeometryString(ElgPoints)) {
5717
publicType.shaderQualifiers.geometry = ElgPoints;
5718
return;
5719
}
5720
if (id == TQualifier::getGeometryString(ElgLines)) {
5721
publicType.shaderQualifiers.geometry = ElgLines;
5722
return;
5723
}
5724
if (language == EShLangGeometry) {
5725
if (id == TQualifier::getGeometryString(ElgLineStrip)) {
5726
publicType.shaderQualifiers.geometry = ElgLineStrip;
5727
return;
5728
}
5729
if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) {
5730
publicType.shaderQualifiers.geometry = ElgLinesAdjacency;
5731
return;
5732
}
5733
if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) {
5734
publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency;
5735
return;
5736
}
5737
if (id == TQualifier::getGeometryString(ElgTriangleStrip)) {
5738
publicType.shaderQualifiers.geometry = ElgTriangleStrip;
5739
return;
5740
}
5741
if (id == "passthrough") {
5742
requireExtensions(loc, 1, &E_SPV_NV_geometry_shader_passthrough, "geometry shader passthrough");
5743
publicType.qualifier.layoutPassthrough = true;
5744
intermediate.setGeoPassthroughEXT();
5745
return;
5746
}
5747
}
5748
} else {
5749
assert(language == EShLangTessEvaluation);
5750
5751
// input primitive
5752
if (id == TQualifier::getGeometryString(ElgTriangles)) {
5753
publicType.shaderQualifiers.geometry = ElgTriangles;
5754
return;
5755
}
5756
if (id == TQualifier::getGeometryString(ElgQuads)) {
5757
publicType.shaderQualifiers.geometry = ElgQuads;
5758
return;
5759
}
5760
if (id == TQualifier::getGeometryString(ElgIsolines)) {
5761
publicType.shaderQualifiers.geometry = ElgIsolines;
5762
return;
5763
}
5764
5765
// vertex spacing
5766
if (id == TQualifier::getVertexSpacingString(EvsEqual)) {
5767
publicType.shaderQualifiers.spacing = EvsEqual;
5768
return;
5769
}
5770
if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) {
5771
publicType.shaderQualifiers.spacing = EvsFractionalEven;
5772
return;
5773
}
5774
if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) {
5775
publicType.shaderQualifiers.spacing = EvsFractionalOdd;
5776
return;
5777
}
5778
5779
// triangle order
5780
if (id == TQualifier::getVertexOrderString(EvoCw)) {
5781
publicType.shaderQualifiers.order = EvoCw;
5782
return;
5783
}
5784
if (id == TQualifier::getVertexOrderString(EvoCcw)) {
5785
publicType.shaderQualifiers.order = EvoCcw;
5786
return;
5787
}
5788
5789
// point mode
5790
if (id == "point_mode") {
5791
publicType.shaderQualifiers.pointMode = true;
5792
return;
5793
}
5794
}
5795
}
5796
if (language == EShLangFragment) {
5797
if (id == "origin_upper_left") {
5798
requireProfile(loc, ECoreProfile | ECompatibilityProfile | ENoProfile, "origin_upper_left");
5799
if (profile == ENoProfile) {
5800
profileRequires(loc,ECoreProfile | ECompatibilityProfile, 140, E_GL_ARB_fragment_coord_conventions, "origin_upper_left");
5801
}
5802
5803
publicType.shaderQualifiers.originUpperLeft = true;
5804
return;
5805
}
5806
if (id == "pixel_center_integer") {
5807
requireProfile(loc, ECoreProfile | ECompatibilityProfile | ENoProfile, "pixel_center_integer");
5808
if (profile == ENoProfile) {
5809
profileRequires(loc,ECoreProfile | ECompatibilityProfile, 140, E_GL_ARB_fragment_coord_conventions, "pixel_center_integer");
5810
}
5811
publicType.shaderQualifiers.pixelCenterInteger = true;
5812
return;
5813
}
5814
if (id == "early_fragment_tests") {
5815
profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "early_fragment_tests");
5816
profileRequires(loc, EEsProfile, 310, nullptr, "early_fragment_tests");
5817
publicType.shaderQualifiers.earlyFragmentTests = true;
5818
return;
5819
}
5820
if (id == "early_and_late_fragment_tests_amd") {
5821
profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_AMD_shader_early_and_late_fragment_tests, "early_and_late_fragment_tests_amd");
5822
profileRequires(loc, EEsProfile, 310, nullptr, "early_and_late_fragment_tests_amd");
5823
publicType.shaderQualifiers.earlyAndLateFragmentTestsAMD = true;
5824
return;
5825
}
5826
if (id == "post_depth_coverage") {
5827
requireExtensions(loc, Num_post_depth_coverageEXTs, post_depth_coverageEXTs, "post depth coverage");
5828
if (extensionTurnedOn(E_GL_ARB_post_depth_coverage)) {
5829
publicType.shaderQualifiers.earlyFragmentTests = true;
5830
}
5831
publicType.shaderQualifiers.postDepthCoverage = true;
5832
return;
5833
}
5834
/* id is transformed into lower case in the beginning of this function. */
5835
if (id == "non_coherent_color_attachment_readext") {
5836
requireExtensions(loc, 1, &E_GL_EXT_shader_tile_image, "non_coherent_color_attachment_readEXT");
5837
publicType.shaderQualifiers.nonCoherentColorAttachmentReadEXT = true;
5838
return;
5839
}
5840
if (id == "non_coherent_depth_attachment_readext") {
5841
requireExtensions(loc, 1, &E_GL_EXT_shader_tile_image, "non_coherent_depth_attachment_readEXT");
5842
publicType.shaderQualifiers.nonCoherentDepthAttachmentReadEXT = true;
5843
return;
5844
}
5845
if (id == "non_coherent_stencil_attachment_readext") {
5846
requireExtensions(loc, 1, &E_GL_EXT_shader_tile_image, "non_coherent_stencil_attachment_readEXT");
5847
publicType.shaderQualifiers.nonCoherentStencilAttachmentReadEXT = true;
5848
return;
5849
}
5850
for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth+1)) {
5851
if (id == TQualifier::getLayoutDepthString(depth)) {
5852
requireProfile(loc, ECoreProfile | ECompatibilityProfile, "depth layout qualifier");
5853
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "depth layout qualifier");
5854
publicType.shaderQualifiers.layoutDepth = depth;
5855
return;
5856
}
5857
}
5858
for (TLayoutStencil stencil = (TLayoutStencil)(ElsNone + 1); stencil < ElsCount; stencil = (TLayoutStencil)(stencil+1)) {
5859
if (id == TQualifier::getLayoutStencilString(stencil)) {
5860
requireProfile(loc, ECoreProfile | ECompatibilityProfile, "stencil layout qualifier");
5861
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "stencil layout qualifier");
5862
publicType.shaderQualifiers.layoutStencil = stencil;
5863
return;
5864
}
5865
}
5866
for (TInterlockOrdering order = (TInterlockOrdering)(EioNone + 1); order < EioCount; order = (TInterlockOrdering)(order+1)) {
5867
if (id == TQualifier::getInterlockOrderingString(order)) {
5868
requireProfile(loc, ECoreProfile | ECompatibilityProfile, "fragment shader interlock layout qualifier");
5869
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 450, nullptr, "fragment shader interlock layout qualifier");
5870
requireExtensions(loc, 1, &E_GL_ARB_fragment_shader_interlock, TQualifier::getInterlockOrderingString(order));
5871
if (order == EioShadingRateInterlockOrdered || order == EioShadingRateInterlockUnordered)
5872
requireExtensions(loc, 1, &E_GL_NV_shading_rate_image, TQualifier::getInterlockOrderingString(order));
5873
publicType.shaderQualifiers.interlockOrdering = order;
5874
return;
5875
}
5876
}
5877
if (id.compare(0, 13, "blend_support") == 0) {
5878
bool found = false;
5879
for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
5880
if (id == TQualifier::getBlendEquationString(be)) {
5881
profileRequires(loc, EEsProfile, 320, E_GL_KHR_blend_equation_advanced, "blend equation");
5882
profileRequires(loc, ~EEsProfile, 0, E_GL_KHR_blend_equation_advanced, "blend equation");
5883
intermediate.addBlendEquation(be);
5884
publicType.shaderQualifiers.blendEquation = true;
5885
found = true;
5886
break;
5887
}
5888
}
5889
if (! found)
5890
error(loc, "unknown blend equation", "blend_support", "");
5891
return;
5892
}
5893
if (id == "override_coverage") {
5894
requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage");
5895
publicType.shaderQualifiers.layoutOverrideCoverage = true;
5896
return;
5897
}
5898
if (id == "full_quads")
5899
{
5900
const char* feature = "full_quads qualifier";
5901
requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, feature);
5902
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 140, E_GL_EXT_shader_quad_control, feature);
5903
profileRequires(loc, EEsProfile, 310, E_GL_EXT_shader_quad_control, feature);
5904
publicType.qualifier.layoutFullQuads = true;
5905
return;
5906
}
5907
}
5908
if (language == EShLangVertex ||
5909
language == EShLangTessControl ||
5910
language == EShLangTessEvaluation ||
5911
language == EShLangGeometry ) {
5912
if (id == "viewport_relative") {
5913
requireExtensions(loc, 1, &E_GL_NV_viewport_array2, "view port array2");
5914
publicType.qualifier.layoutViewportRelative = true;
5915
return;
5916
}
5917
} else {
5918
if (language == EShLangRayGen || language == EShLangIntersect ||
5919
language == EShLangAnyHit || language == EShLangClosestHit ||
5920
language == EShLangMiss || language == EShLangCallable) {
5921
if (id == "shaderrecordnv" || id == "shaderrecordext") {
5922
if (id == "shaderrecordnv") {
5923
requireExtensions(loc, 1, &E_GL_NV_ray_tracing, "shader record NV");
5924
} else {
5925
requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "shader record EXT");
5926
}
5927
publicType.qualifier.layoutShaderRecord = true;
5928
return;
5929
} else if (id == "hitobjectshaderrecordnv") {
5930
requireExtensions(loc, 1, &E_GL_NV_shader_invocation_reorder, "hitobject shader record NV");
5931
publicType.qualifier.layoutHitObjectShaderRecordNV = true;
5932
return;
5933
}
5934
5935
}
5936
}
5937
if (language == EShLangCompute) {
5938
if (id.compare(0, 17, "derivative_group_") == 0) {
5939
requireExtensions(loc, 1, &E_GL_NV_compute_shader_derivatives, "compute shader derivatives");
5940
if (id == "derivative_group_quadsnv") {
5941
publicType.shaderQualifiers.layoutDerivativeGroupQuads = true;
5942
return;
5943
} else if (id == "derivative_group_linearnv") {
5944
publicType.shaderQualifiers.layoutDerivativeGroupLinear = true;
5945
return;
5946
}
5947
}
5948
}
5949
5950
if (id == "primitive_culling") {
5951
requireExtensions(loc, 1, &E_GL_EXT_ray_flags_primitive_culling, "primitive culling");
5952
publicType.shaderQualifiers.layoutPrimitiveCulling = true;
5953
return;
5954
}
5955
5956
if (id == "quad_derivatives")
5957
{
5958
const char* feature = "quad_derivatives qualifier";
5959
requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, feature);
5960
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 140, E_GL_EXT_shader_quad_control, feature);
5961
profileRequires(loc, EEsProfile, 310, E_GL_EXT_shader_quad_control, feature);
5962
publicType.qualifier.layoutQuadDeriv = true;
5963
return;
5964
}
5965
5966
error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), "");
5967
}
5968
5969
// Put the id's layout qualifier value into the public type, for qualifiers having a number set.
5970
// This is before we know any type information for error checking.
5971
void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id, const TIntermTyped* node)
5972
{
5973
const char* feature = "layout-id value";
5974
const char* nonLiteralFeature = "non-literal layout-id value";
5975
5976
integerCheck(node, feature);
5977
const TIntermConstantUnion* constUnion = node->getAsConstantUnion();
5978
int value;
5979
bool nonLiteral = false;
5980
if (constUnion) {
5981
value = constUnion->getConstArray()[0].getIConst();
5982
if (! constUnion->isLiteral()) {
5983
requireProfile(loc, ECoreProfile | ECompatibilityProfile, nonLiteralFeature);
5984
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, nonLiteralFeature);
5985
}
5986
} else {
5987
// grammar should have give out the error message
5988
value = 0;
5989
nonLiteral = true;
5990
}
5991
5992
if (value < 0) {
5993
error(loc, "cannot be negative", feature, "");
5994
return;
5995
}
5996
5997
std::transform(id.begin(), id.end(), id.begin(), ::tolower);
5998
5999
if (id == "offset") {
6000
// "offset" can be for either
6001
// - uniform offsets
6002
// - atomic_uint offsets
6003
const char* feature = "offset";
6004
if (spvVersion.spv == 0) {
6005
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
6006
const char* exts[2] = { E_GL_ARB_enhanced_layouts, E_GL_ARB_shader_atomic_counters };
6007
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, 2, exts, feature);
6008
profileRequires(loc, EEsProfile, 310, nullptr, feature);
6009
}
6010
publicType.qualifier.layoutOffset = value;
6011
publicType.qualifier.explicitOffset = true;
6012
if (nonLiteral)
6013
error(loc, "needs a literal integer", "offset", "");
6014
return;
6015
} else if (id == "align") {
6016
const char* feature = "uniform buffer-member align";
6017
if (spvVersion.spv == 0) {
6018
requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
6019
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
6020
}
6021
// "The specified alignment must be a power of 2, or a compile-time error results."
6022
if (! IsPow2(value))
6023
error(loc, "must be a power of 2", "align", "");
6024
else
6025
publicType.qualifier.layoutAlign = value;
6026
if (nonLiteral)
6027
error(loc, "needs a literal integer", "align", "");
6028
return;
6029
} else if (id == "location") {
6030
profileRequires(loc, EEsProfile, 300, nullptr, "location");
6031
const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
6032
// GL_ARB_explicit_uniform_location requires 330 or GL_ARB_explicit_attrib_location we do not need to add it here
6033
profileRequires(loc, ~EEsProfile, 330, 2, exts, "location");
6034
if ((unsigned int)value >= TQualifier::layoutLocationEnd)
6035
error(loc, "location is too large", id.c_str(), "");
6036
else
6037
publicType.qualifier.layoutLocation = value;
6038
if (nonLiteral)
6039
error(loc, "needs a literal integer", "location", "");
6040
return;
6041
} else if (id == "set") {
6042
if ((unsigned int)value >= TQualifier::layoutSetEnd)
6043
error(loc, "set is too large", id.c_str(), "");
6044
else
6045
publicType.qualifier.layoutSet = value;
6046
if (value != 0)
6047
requireVulkan(loc, "descriptor set");
6048
if (nonLiteral)
6049
error(loc, "needs a literal integer", "set", "");
6050
return;
6051
} else if (id == "binding") {
6052
profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, "binding");
6053
profileRequires(loc, EEsProfile, 310, nullptr, "binding");
6054
if ((unsigned int)value >= TQualifier::layoutBindingEnd)
6055
error(loc, "binding is too large", id.c_str(), "");
6056
else
6057
publicType.qualifier.layoutBinding = value;
6058
if (nonLiteral)
6059
error(loc, "needs a literal integer", "binding", "");
6060
return;
6061
}
6062
if (id == "constant_id") {
6063
requireSpv(loc, "constant_id");
6064
if (value >= (int)TQualifier::layoutSpecConstantIdEnd) {
6065
error(loc, "specialization-constant id is too large", id.c_str(), "");
6066
} else {
6067
publicType.qualifier.layoutSpecConstantId = value;
6068
publicType.qualifier.specConstant = true;
6069
if (! intermediate.addUsedConstantId(value))
6070
error(loc, "specialization-constant id already used", id.c_str(), "");
6071
}
6072
if (nonLiteral)
6073
error(loc, "needs a literal integer", "constant_id", "");
6074
return;
6075
}
6076
if (id == "component") {
6077
requireProfile(loc, ECoreProfile | ECompatibilityProfile, "component");
6078
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "component");
6079
if ((unsigned)value >= TQualifier::layoutComponentEnd)
6080
error(loc, "component is too large", id.c_str(), "");
6081
else
6082
publicType.qualifier.layoutComponent = value;
6083
if (nonLiteral)
6084
error(loc, "needs a literal integer", "component", "");
6085
return;
6086
}
6087
if (id.compare(0, 4, "xfb_") == 0) {
6088
// "Any shader making any static use (after preprocessing) of any of these
6089
// *xfb_* qualifiers will cause the shader to be in a transform feedback
6090
// capturing mode and hence responsible for describing the transform feedback
6091
// setup."
6092
intermediate.setXfbMode();
6093
const char* feature = "transform feedback qualifier";
6094
requireStage(loc, (EShLanguageMask)(EShLangVertexMask | EShLangGeometryMask | EShLangTessControlMask | EShLangTessEvaluationMask), feature);
6095
requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
6096
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
6097
if (id == "xfb_buffer") {
6098
// "It is a compile-time error to specify an *xfb_buffer* that is greater than
6099
// the implementation-dependent constant gl_MaxTransformFeedbackBuffers."
6100
if (value >= resources.maxTransformFeedbackBuffers)
6101
error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers);
6102
if (value >= (int)TQualifier::layoutXfbBufferEnd)
6103
error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1);
6104
else
6105
publicType.qualifier.layoutXfbBuffer = value;
6106
if (nonLiteral)
6107
error(loc, "needs a literal integer", "xfb_buffer", "");
6108
return;
6109
} else if (id == "xfb_offset") {
6110
if (value >= (int)TQualifier::layoutXfbOffsetEnd)
6111
error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd-1);
6112
else
6113
publicType.qualifier.layoutXfbOffset = value;
6114
if (nonLiteral)
6115
error(loc, "needs a literal integer", "xfb_offset", "");
6116
return;
6117
} else if (id == "xfb_stride") {
6118
// "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
6119
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
6120
if (value > 4 * resources.maxTransformFeedbackInterleavedComponents) {
6121
error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d",
6122
resources.maxTransformFeedbackInterleavedComponents);
6123
}
6124
if (value >= (int)TQualifier::layoutXfbStrideEnd)
6125
error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd-1);
6126
else
6127
publicType.qualifier.layoutXfbStride = value;
6128
if (nonLiteral)
6129
error(loc, "needs a literal integer", "xfb_stride", "");
6130
return;
6131
}
6132
}
6133
if (id == "input_attachment_index") {
6134
requireVulkan(loc, "input_attachment_index");
6135
if (value >= (int)TQualifier::layoutAttachmentEnd)
6136
error(loc, "attachment index is too large", id.c_str(), "");
6137
else
6138
publicType.qualifier.layoutAttachment = value;
6139
if (nonLiteral)
6140
error(loc, "needs a literal integer", "input_attachment_index", "");
6141
return;
6142
}
6143
if (id == "num_views") {
6144
requireExtensions(loc, Num_OVR_multiview_EXTs, OVR_multiview_EXTs, "num_views");
6145
publicType.shaderQualifiers.numViews = value;
6146
if (nonLiteral)
6147
error(loc, "needs a literal integer", "num_views", "");
6148
return;
6149
}
6150
if (language == EShLangVertex ||
6151
language == EShLangTessControl ||
6152
language == EShLangTessEvaluation ||
6153
language == EShLangGeometry) {
6154
if (id == "secondary_view_offset") {
6155
requireExtensions(loc, 1, &E_GL_NV_stereo_view_rendering, "stereo view rendering");
6156
publicType.qualifier.layoutSecondaryViewportRelativeOffset = value;
6157
if (nonLiteral)
6158
error(loc, "needs a literal integer", "secondary_view_offset", "");
6159
return;
6160
}
6161
}
6162
6163
if (id == "buffer_reference_align") {
6164
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference_align");
6165
if (! IsPow2(value))
6166
error(loc, "must be a power of 2", "buffer_reference_align", "");
6167
else
6168
publicType.qualifier.layoutBufferReferenceAlign = IntLog2(value);
6169
if (nonLiteral)
6170
error(loc, "needs a literal integer", "buffer_reference_align", "");
6171
return;
6172
}
6173
6174
switch (language) {
6175
case EShLangTessControl:
6176
if (id == "vertices") {
6177
if (value == 0)
6178
error(loc, "must be greater than 0", "vertices", "");
6179
else
6180
publicType.shaderQualifiers.vertices = value;
6181
if (nonLiteral)
6182
error(loc, "needs a literal integer", "vertices", "");
6183
return;
6184
}
6185
break;
6186
6187
case EShLangGeometry:
6188
if (id == "invocations") {
6189
profileRequires(loc, ECompatibilityProfile | ECoreProfile, 400, nullptr, "invocations");
6190
if (value == 0)
6191
error(loc, "must be at least 1", "invocations", "");
6192
else
6193
publicType.shaderQualifiers.invocations = value;
6194
if (nonLiteral)
6195
error(loc, "needs a literal integer", "invocations", "");
6196
return;
6197
}
6198
if (id == "max_vertices") {
6199
publicType.shaderQualifiers.vertices = value;
6200
if (value > resources.maxGeometryOutputVertices)
6201
error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", "");
6202
if (nonLiteral)
6203
error(loc, "needs a literal integer", "max_vertices", "");
6204
return;
6205
}
6206
if (id == "stream") {
6207
requireProfile(loc, ~EEsProfile, "selecting output stream");
6208
publicType.qualifier.layoutStream = value;
6209
if (value > 0)
6210
intermediate.setMultiStream();
6211
if (nonLiteral)
6212
error(loc, "needs a literal integer", "stream", "");
6213
return;
6214
}
6215
break;
6216
6217
case EShLangFragment:
6218
if (id == "index") {
6219
requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, "index layout qualifier on fragment output");
6220
const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
6221
profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output");
6222
profileRequires(loc, EEsProfile ,310, E_GL_EXT_blend_func_extended, "index layout qualifier on fragment output");
6223
// "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1."
6224
if (value < 0 || value > 1) {
6225
value = 0;
6226
error(loc, "value must be 0 or 1", "index", "");
6227
}
6228
6229
publicType.qualifier.layoutIndex = value;
6230
if (nonLiteral)
6231
error(loc, "needs a literal integer", "index", "");
6232
return;
6233
}
6234
break;
6235
6236
case EShLangMesh:
6237
if (id == "max_vertices") {
6238
requireExtensions(loc, Num_AEP_mesh_shader, AEP_mesh_shader, "max_vertices");
6239
publicType.shaderQualifiers.vertices = value;
6240
int max = extensionTurnedOn(E_GL_EXT_mesh_shader) ? resources.maxMeshOutputVerticesEXT
6241
: resources.maxMeshOutputVerticesNV;
6242
if (value > max) {
6243
TString maxsErrtring = "too large, must be less than ";
6244
maxsErrtring.append(extensionTurnedOn(E_GL_EXT_mesh_shader) ? "gl_MaxMeshOutputVerticesEXT"
6245
: "gl_MaxMeshOutputVerticesNV");
6246
error(loc, maxsErrtring.c_str(), "max_vertices", "");
6247
}
6248
if (nonLiteral)
6249
error(loc, "needs a literal integer", "max_vertices", "");
6250
return;
6251
}
6252
if (id == "max_primitives") {
6253
requireExtensions(loc, Num_AEP_mesh_shader, AEP_mesh_shader, "max_primitives");
6254
publicType.shaderQualifiers.primitives = value;
6255
int max = extensionTurnedOn(E_GL_EXT_mesh_shader) ? resources.maxMeshOutputPrimitivesEXT
6256
: resources.maxMeshOutputPrimitivesNV;
6257
if (value > max) {
6258
TString maxsErrtring = "too large, must be less than ";
6259
maxsErrtring.append(extensionTurnedOn(E_GL_EXT_mesh_shader) ? "gl_MaxMeshOutputPrimitivesEXT"
6260
: "gl_MaxMeshOutputPrimitivesNV");
6261
error(loc, maxsErrtring.c_str(), "max_primitives", "");
6262
}
6263
if (nonLiteral)
6264
error(loc, "needs a literal integer", "max_primitives", "");
6265
return;
6266
}
6267
[[fallthrough]];
6268
6269
case EShLangTask:
6270
// Fall through
6271
case EShLangCompute:
6272
if (id.compare(0, 11, "local_size_") == 0) {
6273
if (language == EShLangMesh || language == EShLangTask) {
6274
requireExtensions(loc, Num_AEP_mesh_shader, AEP_mesh_shader, "gl_WorkGroupSize");
6275
} else {
6276
profileRequires(loc, EEsProfile, 310, nullptr, "gl_WorkGroupSize");
6277
profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_compute_shader, "gl_WorkGroupSize");
6278
}
6279
if (nonLiteral)
6280
error(loc, "needs a literal integer", "local_size", "");
6281
if (id.size() == 12 && value == 0) {
6282
error(loc, "must be at least 1", id.c_str(), "");
6283
return;
6284
}
6285
if (id == "local_size_x") {
6286
publicType.shaderQualifiers.localSize[0] = value;
6287
publicType.shaderQualifiers.localSizeNotDefault[0] = true;
6288
return;
6289
}
6290
if (id == "local_size_y") {
6291
publicType.shaderQualifiers.localSize[1] = value;
6292
publicType.shaderQualifiers.localSizeNotDefault[1] = true;
6293
return;
6294
}
6295
if (id == "local_size_z") {
6296
publicType.shaderQualifiers.localSize[2] = value;
6297
publicType.shaderQualifiers.localSizeNotDefault[2] = true;
6298
return;
6299
}
6300
if (spvVersion.spv != 0) {
6301
if (id == "local_size_x_id") {
6302
publicType.shaderQualifiers.localSizeSpecId[0] = value;
6303
return;
6304
}
6305
if (id == "local_size_y_id") {
6306
publicType.shaderQualifiers.localSizeSpecId[1] = value;
6307
return;
6308
}
6309
if (id == "local_size_z_id") {
6310
publicType.shaderQualifiers.localSizeSpecId[2] = value;
6311
return;
6312
}
6313
}
6314
}
6315
break;
6316
6317
default:
6318
break;
6319
}
6320
6321
error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), "");
6322
}
6323
6324
// Merge any layout qualifier information from src into dst, leaving everything else in dst alone
6325
//
6326
// "More than one layout qualifier may appear in a single declaration.
6327
// Additionally, the same layout-qualifier-name can occur multiple times
6328
// within a layout qualifier or across multiple layout qualifiers in the
6329
// same declaration. When the same layout-qualifier-name occurs
6330
// multiple times, in a single declaration, the last occurrence overrides
6331
// the former occurrence(s). Further, if such a layout-qualifier-name
6332
// will effect subsequent declarations or other observable behavior, it
6333
// is only the last occurrence that will have any effect, behaving as if
6334
// the earlier occurrence(s) within the declaration are not present.
6335
// This is also true for overriding layout-qualifier-names, where one
6336
// overrides the other (e.g., row_major vs. column_major); only the last
6337
// occurrence has any effect."
6338
void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
6339
{
6340
if (src.hasMatrix())
6341
dst.layoutMatrix = src.layoutMatrix;
6342
if (src.hasPacking())
6343
dst.layoutPacking = src.layoutPacking;
6344
6345
if (src.hasStream())
6346
dst.layoutStream = src.layoutStream;
6347
if (src.hasFormat())
6348
dst.layoutFormat = src.layoutFormat;
6349
if (src.hasXfbBuffer())
6350
dst.layoutXfbBuffer = src.layoutXfbBuffer;
6351
if (src.hasBufferReferenceAlign())
6352
dst.layoutBufferReferenceAlign = src.layoutBufferReferenceAlign;
6353
6354
if (src.hasAlign())
6355
dst.layoutAlign = src.layoutAlign;
6356
6357
if (! inheritOnly) {
6358
if (src.hasLocation())
6359
dst.layoutLocation = src.layoutLocation;
6360
if (src.hasOffset())
6361
dst.layoutOffset = src.layoutOffset;
6362
if (src.hasSet())
6363
dst.layoutSet = src.layoutSet;
6364
if (src.layoutBinding != TQualifier::layoutBindingEnd)
6365
dst.layoutBinding = src.layoutBinding;
6366
6367
if (src.hasSpecConstantId())
6368
dst.layoutSpecConstantId = src.layoutSpecConstantId;
6369
6370
if (src.hasComponent())
6371
dst.layoutComponent = src.layoutComponent;
6372
if (src.hasIndex())
6373
dst.layoutIndex = src.layoutIndex;
6374
if (src.hasXfbStride())
6375
dst.layoutXfbStride = src.layoutXfbStride;
6376
if (src.hasXfbOffset())
6377
dst.layoutXfbOffset = src.layoutXfbOffset;
6378
if (src.hasAttachment())
6379
dst.layoutAttachment = src.layoutAttachment;
6380
if (src.layoutPushConstant)
6381
dst.layoutPushConstant = true;
6382
6383
if (src.layoutBufferReference)
6384
dst.layoutBufferReference = true;
6385
6386
if (src.layoutPassthrough)
6387
dst.layoutPassthrough = true;
6388
if (src.layoutViewportRelative)
6389
dst.layoutViewportRelative = true;
6390
if (src.layoutSecondaryViewportRelativeOffset != -2048)
6391
dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset;
6392
if (src.layoutShaderRecord)
6393
dst.layoutShaderRecord = true;
6394
if (src.layoutFullQuads)
6395
dst.layoutFullQuads = true;
6396
if (src.layoutQuadDeriv)
6397
dst.layoutQuadDeriv = true;
6398
if (src.layoutBindlessSampler)
6399
dst.layoutBindlessSampler = true;
6400
if (src.layoutBindlessImage)
6401
dst.layoutBindlessImage = true;
6402
if (src.pervertexNV)
6403
dst.pervertexNV = true;
6404
if (src.pervertexEXT)
6405
dst.pervertexEXT = true;
6406
if (src.layoutHitObjectShaderRecordNV)
6407
dst.layoutHitObjectShaderRecordNV = true;
6408
}
6409
}
6410
6411
// Do error layout error checking given a full variable/block declaration.
6412
void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symbol)
6413
{
6414
const TType& type = symbol.getType();
6415
const TQualifier& qualifier = type.getQualifier();
6416
6417
// first, cross check WRT to just the type
6418
layoutTypeCheck(loc, type);
6419
6420
// now, any remaining error checking based on the object itself
6421
6422
if (qualifier.hasAnyLocation()) {
6423
switch (qualifier.storage) {
6424
case EvqUniform:
6425
case EvqBuffer:
6426
if (symbol.getAsVariable() == nullptr)
6427
error(loc, "can only be used on variable declaration", "location", "");
6428
break;
6429
default:
6430
break;
6431
}
6432
}
6433
6434
// user-variable location check, which are required for SPIR-V in/out:
6435
// - variables have it directly,
6436
// - blocks have it on each member (already enforced), so check first one
6437
if (spvVersion.spv > 0 && !parsingBuiltins && qualifier.builtIn == EbvNone &&
6438
!qualifier.hasLocation() && !intermediate.getAutoMapLocations()) {
6439
6440
switch (qualifier.storage) {
6441
case EvqVaryingIn:
6442
case EvqVaryingOut:
6443
if (!type.getQualifier().isTaskMemory() && !type.getQualifier().hasSpirvDecorate() &&
6444
(type.getBasicType() != EbtBlock ||
6445
(!(*type.getStruct())[0].type->getQualifier().hasLocation() &&
6446
(*type.getStruct())[0].type->getQualifier().builtIn == EbvNone)))
6447
error(loc, "SPIR-V requires location for user input/output", "location", "");
6448
break;
6449
default:
6450
break;
6451
}
6452
}
6453
6454
// Check packing and matrix
6455
if (qualifier.hasUniformLayout()) {
6456
switch (qualifier.storage) {
6457
case EvqUniform:
6458
case EvqBuffer:
6459
if (type.getBasicType() != EbtBlock) {
6460
if (qualifier.hasMatrix())
6461
error(loc, "cannot specify matrix layout on a variable declaration", "layout", "");
6462
if (qualifier.hasPacking())
6463
error(loc, "cannot specify packing on a variable declaration", "layout", "");
6464
// "The offset qualifier can only be used on block members of blocks..."
6465
if (qualifier.hasOffset() && !type.isAtomic())
6466
error(loc, "cannot specify on a variable declaration", "offset", "");
6467
// "The align qualifier can only be used on blocks or block members..."
6468
if (qualifier.hasAlign())
6469
error(loc, "cannot specify on a variable declaration", "align", "");
6470
if (qualifier.isPushConstant())
6471
error(loc, "can only specify on a uniform block", "push_constant", "");
6472
if (qualifier.isShaderRecord())
6473
error(loc, "can only specify on a buffer block", "shaderRecordNV", "");
6474
if (qualifier.hasLocation() && type.isAtomic())
6475
error(loc, "cannot specify on atomic counter", "location", "");
6476
}
6477
break;
6478
default:
6479
// these were already filtered by layoutTypeCheck() (or its callees)
6480
break;
6481
}
6482
}
6483
}
6484
6485
// "For some blocks declared as arrays, the location can only be applied at the block level:
6486
// When a block is declared as an array where additional locations are needed for each member
6487
// for each block array element, it is a compile-time error to specify locations on the block
6488
// members. That is, when locations would be under specified by applying them on block members,
6489
// they are not allowed on block members. For arrayed interfaces (those generally having an
6490
// extra level of arrayness due to interface expansion), the outer array is stripped before
6491
// applying this rule."
6492
void TParseContext::layoutMemberLocationArrayCheck(const TSourceLoc& loc, bool memberWithLocation,
6493
TArraySizes* arraySizes)
6494
{
6495
if (memberWithLocation && arraySizes != nullptr) {
6496
if (arraySizes->getNumDims() > (currentBlockQualifier.isArrayedIo(language) ? 1 : 0))
6497
error(loc, "cannot use in a block array where new locations are needed for each block element",
6498
"location", "");
6499
}
6500
}
6501
6502
// Do layout error checking with respect to a type.
6503
void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
6504
{
6505
const TQualifier& qualifier = type.getQualifier();
6506
6507
// first, intra-layout qualifier-only error checking
6508
layoutQualifierCheck(loc, qualifier);
6509
6510
// now, error checking combining type and qualifier
6511
6512
if (qualifier.hasAnyLocation()) {
6513
if (qualifier.hasLocation()) {
6514
if (qualifier.storage == EvqVaryingOut && language == EShLangFragment) {
6515
if (qualifier.layoutLocation >= (unsigned int)resources.maxDrawBuffers)
6516
error(loc, "too large for fragment output", "location", "");
6517
}
6518
}
6519
if (qualifier.hasComponent()) {
6520
// "It is a compile-time error if this sequence of components gets larger than 3."
6521
if (qualifier.layoutComponent + type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1) > 4)
6522
error(loc, "type overflows the available 4 components", "component", "");
6523
6524
// "It is a compile-time error to apply the component qualifier to a matrix, a structure, a block, or an array containing any of these."
6525
if (type.isMatrix() || type.getBasicType() == EbtBlock || type.getBasicType() == EbtStruct)
6526
error(loc, "cannot apply to a matrix, structure, or block", "component", "");
6527
6528
// " It is a compile-time error to use component 1 or 3 as the beginning of a double or dvec2."
6529
if (type.getBasicType() == EbtDouble)
6530
if (qualifier.layoutComponent & 1)
6531
error(loc, "doubles cannot start on an odd-numbered component", "component", "");
6532
}
6533
6534
switch (qualifier.storage) {
6535
case EvqVaryingIn:
6536
case EvqVaryingOut:
6537
if (type.getBasicType() == EbtBlock)
6538
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "location qualifier on in/out block");
6539
if (type.getQualifier().isTaskMemory())
6540
error(loc, "cannot apply to taskNV in/out blocks", "location", "");
6541
break;
6542
case EvqUniform:
6543
case EvqBuffer:
6544
if (type.getBasicType() == EbtBlock)
6545
error(loc, "cannot apply to uniform or buffer block", "location", "");
6546
else if (type.getBasicType() == EbtSampler && type.getSampler().isAttachmentEXT())
6547
error(loc, "only applies to", "location", "%s with storage tileImageEXT", type.getBasicTypeString().c_str());
6548
break;
6549
case EvqtaskPayloadSharedEXT:
6550
error(loc, "cannot apply to taskPayloadSharedEXT", "location", "");
6551
break;
6552
case EvqPayload:
6553
case EvqPayloadIn:
6554
case EvqHitAttr:
6555
case EvqCallableData:
6556
case EvqCallableDataIn:
6557
case EvqHitObjectAttrNV:
6558
case EvqSpirvStorageClass:
6559
break;
6560
case EvqTileImageEXT:
6561
break;
6562
default:
6563
error(loc, "can only apply to uniform, buffer, in, or out storage qualifiers", "location", "");
6564
break;
6565
}
6566
6567
bool typeCollision;
6568
int repeated = intermediate.addUsedLocation(qualifier, type, typeCollision);
6569
if (repeated >= 0 && ! typeCollision)
6570
error(loc, "overlapping use of location", "location", "%d", repeated);
6571
// When location aliasing, the aliases sharing the location must have the same underlying numerical type and bit width(
6572
// floating - point or integer, 32 - bit versus 64 - bit,etc.)
6573
if (typeCollision && (qualifier.isPipeInput() || qualifier.isPipeOutput() || qualifier.storage == EvqTileImageEXT))
6574
error(loc, "the aliases sharing the location", "location", "%d must be the same basic type and interpolation qualification", repeated);
6575
}
6576
6577
if (qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()) {
6578
if (type.isUnsizedArray()) {
6579
error(loc, "unsized array", "xfb_offset", "in buffer %d", qualifier.layoutXfbBuffer);
6580
} else {
6581
int repeated = intermediate.addXfbBufferOffset(type);
6582
if (repeated >= 0)
6583
error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
6584
}
6585
6586
// "The offset must be a multiple of the size of the first component of the first
6587
// qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
6588
// containing a double or 64-bit integer, the offset must also be a multiple of 8..."
6589
if ((type.containsBasicType(EbtDouble) || type.containsBasicType(EbtInt64) || type.containsBasicType(EbtUint64)) &&
6590
! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8))
6591
error(loc, "type contains double or 64-bit integer; xfb_offset must be a multiple of 8", "xfb_offset", "");
6592
else if ((type.containsBasicType(EbtBool) || type.containsBasicType(EbtFloat) ||
6593
type.containsBasicType(EbtInt) || type.containsBasicType(EbtUint)) &&
6594
! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
6595
error(loc, "must be a multiple of size of first component", "xfb_offset", "");
6596
// ..., if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2..."
6597
else if ((type.contains16BitFloat() || type.containsBasicType(EbtInt16) || type.containsBasicType(EbtUint16)) &&
6598
!IsMultipleOfPow2(qualifier.layoutXfbOffset, 2))
6599
error(loc, "type contains half float or 16-bit integer; xfb_offset must be a multiple of 2", "xfb_offset", "");
6600
}
6601
if (qualifier.hasXfbStride() && qualifier.hasXfbBuffer()) {
6602
if (! intermediate.setXfbBufferStride(qualifier.layoutXfbBuffer, qualifier.layoutXfbStride))
6603
error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
6604
}
6605
6606
if (qualifier.hasBinding()) {
6607
// Binding checking, from the spec:
6608
//
6609
// "If the binding point for any uniform or shader storage block instance is less than zero, or greater than or
6610
// equal to the implementation-dependent maximum number of uniform buffer bindings, a compile-time
6611
// error will occur. When the binding identifier is used with a uniform or shader storage block instanced as
6612
// an array of size N, all elements of the array from binding through binding + N - 1 must be within this
6613
// range."
6614
//
6615
if (!type.isOpaque() && type.getBasicType() != EbtBlock && type.getBasicType() != EbtSpirvType)
6616
error(loc, "requires block, or sampler/image, or atomic-counter type", "binding", "");
6617
if (type.getBasicType() == EbtSampler) {
6618
int lastBinding = qualifier.layoutBinding;
6619
if (type.isArray()) {
6620
if (spvVersion.vulkan == 0) {
6621
if (type.isSizedArray())
6622
lastBinding += (type.getCumulativeArraySize() - 1);
6623
else {
6624
warn(loc, "assuming binding count of one for compile-time checking of binding numbers for unsized array", "[]", "");
6625
}
6626
}
6627
}
6628
if (spvVersion.vulkan == 0 && lastBinding >= resources.maxCombinedTextureImageUnits)
6629
error(loc, "sampler binding not less than gl_MaxCombinedTextureImageUnits", "binding", type.isArray() ? "(using array)" : "");
6630
}
6631
if (type.isAtomic() && !spvVersion.vulkanRelaxed) {
6632
if (qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
6633
error(loc, "atomic_uint binding is too large; see gl_MaxAtomicCounterBindings", "binding", "");
6634
return;
6635
}
6636
}
6637
} else if (!intermediate.getAutoMapBindings()) {
6638
// some types require bindings
6639
6640
// atomic_uint
6641
if (type.isAtomic())
6642
error(loc, "layout(binding=X) is required", "atomic_uint", "");
6643
6644
// SPIR-V
6645
if (spvVersion.spv > 0) {
6646
if (qualifier.isUniformOrBuffer()) {
6647
if (type.getBasicType() == EbtBlock && !qualifier.isPushConstant() &&
6648
!qualifier.isShaderRecord() &&
6649
!qualifier.hasAttachment() &&
6650
!qualifier.hasBufferReference())
6651
error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", "");
6652
else if (spvVersion.vulkan > 0 && type.getBasicType() == EbtSampler && !type.getSampler().isAttachmentEXT())
6653
error(loc, "sampler/texture/image requires layout(binding=X)", "binding", "");
6654
}
6655
}
6656
}
6657
6658
// some things can't have arrays of arrays
6659
if (type.isArrayOfArrays()) {
6660
if (spvVersion.vulkan > 0) {
6661
if (type.isOpaque() || (type.getQualifier().isUniformOrBuffer() && type.getBasicType() == EbtBlock))
6662
warn(loc, "Generating SPIR-V array-of-arrays, but Vulkan only supports single array level for this resource", "[][]", "");
6663
}
6664
}
6665
6666
// "The offset qualifier can only be used on block members of blocks..."
6667
if (qualifier.hasOffset()) {
6668
if (type.getBasicType() == EbtBlock)
6669
error(loc, "only applies to block members, not blocks", "offset", "");
6670
}
6671
6672
// Image format
6673
if (qualifier.hasFormat()) {
6674
if (! type.isImage() && !intermediate.getBindlessImageMode())
6675
error(loc, "only apply to images", TQualifier::getLayoutFormatString(qualifier.getFormat()), "");
6676
else {
6677
if (type.getSampler().type == EbtFloat && qualifier.getFormat() > ElfFloatGuard)
6678
error(loc, "does not apply to floating point images", TQualifier::getLayoutFormatString(qualifier.getFormat()), "");
6679
if (type.getSampler().type == EbtInt && (qualifier.getFormat() < ElfFloatGuard || qualifier.getFormat() > ElfIntGuard))
6680
error(loc, "does not apply to signed integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), "");
6681
if (type.getSampler().type == EbtUint && qualifier.getFormat() < ElfIntGuard)
6682
error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), "");
6683
6684
if (isEsProfile()) {
6685
// "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must
6686
// specify either memory qualifier readonly or the memory qualifier writeonly."
6687
if (! (qualifier.getFormat() == ElfR32f || qualifier.getFormat() == ElfR32i || qualifier.getFormat() == ElfR32ui)) {
6688
if (! qualifier.isReadOnly() && ! qualifier.isWriteOnly())
6689
error(loc, "format requires readonly or writeonly memory qualifier", TQualifier::getLayoutFormatString(qualifier.getFormat()), "");
6690
}
6691
}
6692
}
6693
} else if (type.isImage() && ! qualifier.isWriteOnly() && !intermediate.getBindlessImageMode()) {
6694
const char *explanation = "image variables not declared 'writeonly' and without a format layout qualifier";
6695
requireProfile(loc, ECoreProfile | ECompatibilityProfile, explanation);
6696
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shader_image_load_formatted, explanation);
6697
}
6698
6699
if (qualifier.isPushConstant()) {
6700
if (type.getBasicType() != EbtBlock)
6701
error(loc, "can only be used with a block", "push_constant", "");
6702
if (type.isArray())
6703
error(loc, "Push constants blocks can't be an array", "push_constant", "");
6704
}
6705
6706
if (qualifier.hasBufferReference() && type.getBasicType() != EbtBlock)
6707
error(loc, "can only be used with a block", "buffer_reference", "");
6708
6709
if (qualifier.isShaderRecord() && type.getBasicType() != EbtBlock)
6710
error(loc, "can only be used with a block", "shaderRecordNV", "");
6711
6712
// input attachment
6713
if (type.isSubpass()) {
6714
if (extensionTurnedOn(E_GL_EXT_shader_tile_image))
6715
error(loc, "can not be used with GL_EXT_shader_tile_image enabled", type.getSampler().getString().c_str(), "");
6716
if (! qualifier.hasAttachment())
6717
error(loc, "requires an input_attachment_index layout qualifier", "subpass", "");
6718
} else {
6719
if (qualifier.hasAttachment())
6720
error(loc, "can only be used with a subpass", "input_attachment_index", "");
6721
}
6722
6723
// specialization-constant id
6724
if (qualifier.hasSpecConstantId()) {
6725
if (type.getQualifier().storage != EvqConst)
6726
error(loc, "can only be applied to 'const'-qualified scalar", "constant_id", "");
6727
if (! type.isScalar())
6728
error(loc, "can only be applied to a scalar", "constant_id", "");
6729
switch (type.getBasicType())
6730
{
6731
case EbtInt8:
6732
case EbtUint8:
6733
case EbtInt16:
6734
case EbtUint16:
6735
case EbtInt:
6736
case EbtUint:
6737
case EbtInt64:
6738
case EbtUint64:
6739
case EbtBool:
6740
case EbtFloat:
6741
case EbtDouble:
6742
case EbtFloat16:
6743
break;
6744
default:
6745
error(loc, "cannot be applied to this type", "constant_id", "");
6746
break;
6747
}
6748
}
6749
}
6750
6751
static bool storageCanHaveLayoutInBlock(const enum TStorageQualifier storage)
6752
{
6753
switch (storage) {
6754
case EvqUniform:
6755
case EvqBuffer:
6756
case EvqShared:
6757
return true;
6758
default:
6759
return false;
6760
}
6761
}
6762
6763
// Do layout error checking that can be done within a layout qualifier proper, not needing to know
6764
// if there are blocks, atomic counters, variables, etc.
6765
void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier)
6766
{
6767
if (qualifier.storage == EvqShared && qualifier.hasLayout()) {
6768
if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4) {
6769
error(loc, "shared block requires at least SPIR-V 1.4", "shared block", "");
6770
}
6771
profileRequires(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shared_memory_block, "shared block");
6772
}
6773
6774
// "It is a compile-time error to use *component* without also specifying the location qualifier (order does not matter)."
6775
if (qualifier.hasComponent() && ! qualifier.hasLocation())
6776
error(loc, "must specify 'location' to use 'component'", "component", "");
6777
6778
if (qualifier.hasAnyLocation()) {
6779
6780
// "As with input layout qualifiers, all shaders except compute shaders
6781
// allow *location* layout qualifiers on output variable declarations,
6782
// output block declarations, and output block member declarations."
6783
6784
switch (qualifier.storage) {
6785
case EvqVaryingIn:
6786
{
6787
const char* feature = "location qualifier on input";
6788
if (isEsProfile() && version < 310)
6789
requireStage(loc, EShLangVertex, feature);
6790
else
6791
requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
6792
if (language == EShLangVertex) {
6793
const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
6794
profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
6795
profileRequires(loc, EEsProfile, 300, nullptr, feature);
6796
} else {
6797
profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
6798
profileRequires(loc, EEsProfile, 310, nullptr, feature);
6799
}
6800
break;
6801
}
6802
case EvqVaryingOut:
6803
{
6804
const char* feature = "location qualifier on output";
6805
if (isEsProfile() && version < 310)
6806
requireStage(loc, EShLangFragment, feature);
6807
else
6808
requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
6809
if (language == EShLangFragment) {
6810
const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
6811
profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
6812
profileRequires(loc, EEsProfile, 300, nullptr, feature);
6813
} else {
6814
profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
6815
profileRequires(loc, EEsProfile, 310, nullptr, feature);
6816
}
6817
break;
6818
}
6819
case EvqUniform:
6820
case EvqBuffer:
6821
{
6822
const char* feature = "location qualifier on uniform or buffer";
6823
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile | ENoProfile, feature);
6824
profileRequires(loc, ~EEsProfile, 330, E_GL_ARB_explicit_attrib_location, feature);
6825
profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_explicit_uniform_location, feature);
6826
profileRequires(loc, EEsProfile, 310, nullptr, feature);
6827
break;
6828
}
6829
default:
6830
break;
6831
}
6832
if (qualifier.hasIndex()) {
6833
if (qualifier.storage != EvqVaryingOut)
6834
error(loc, "can only be used on an output", "index", "");
6835
if (! qualifier.hasLocation())
6836
error(loc, "can only be used with an explicit location", "index", "");
6837
}
6838
}
6839
6840
if (qualifier.hasBinding()) {
6841
if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory())
6842
error(loc, "requires uniform or buffer storage qualifier", "binding", "");
6843
}
6844
if (qualifier.hasStream()) {
6845
if (!qualifier.isPipeOutput())
6846
error(loc, "can only be used on an output", "stream", "");
6847
}
6848
if (qualifier.hasXfb()) {
6849
if (!qualifier.isPipeOutput())
6850
error(loc, "can only be used on an output", "xfb layout qualifier", "");
6851
}
6852
if (qualifier.hasUniformLayout()) {
6853
if (!storageCanHaveLayoutInBlock(qualifier.storage) && !qualifier.isTaskMemory()) {
6854
if (qualifier.hasMatrix() || qualifier.hasPacking())
6855
error(loc, "matrix or packing qualifiers can only be used on a uniform or buffer", "layout", "");
6856
if (qualifier.hasOffset() || qualifier.hasAlign())
6857
error(loc, "offset/align can only be used on a uniform or buffer", "layout", "");
6858
}
6859
}
6860
if (qualifier.isPushConstant()) {
6861
if (qualifier.storage != EvqUniform)
6862
error(loc, "can only be used with a uniform", "push_constant", "");
6863
if (qualifier.hasSet())
6864
error(loc, "cannot be used with push_constant", "set", "");
6865
if (qualifier.hasBinding())
6866
error(loc, "cannot be used with push_constant", "binding", "");
6867
}
6868
if (qualifier.hasBufferReference()) {
6869
if (qualifier.storage != EvqBuffer)
6870
error(loc, "can only be used with buffer", "buffer_reference", "");
6871
}
6872
if (qualifier.isShaderRecord()) {
6873
if (qualifier.storage != EvqBuffer)
6874
error(loc, "can only be used with a buffer", "shaderRecordNV", "");
6875
if (qualifier.hasBinding())
6876
error(loc, "cannot be used with shaderRecordNV", "binding", "");
6877
if (qualifier.hasSet())
6878
error(loc, "cannot be used with shaderRecordNV", "set", "");
6879
6880
}
6881
6882
if (qualifier.storage == EvqTileImageEXT) {
6883
if (qualifier.hasSet())
6884
error(loc, "cannot be used with tileImageEXT", "set", "");
6885
if (!qualifier.hasLocation())
6886
error(loc, "can only be used with an explicit location", "tileImageEXT", "");
6887
}
6888
6889
if (qualifier.storage == EvqHitAttr && qualifier.hasLayout()) {
6890
error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", "");
6891
}
6892
}
6893
6894
// For places that can't have shader-level layout qualifiers
6895
void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQualifiers& shaderQualifiers)
6896
{
6897
const char* message = "can only apply to a standalone qualifier";
6898
6899
if (shaderQualifiers.geometry != ElgNone)
6900
error(loc, message, TQualifier::getGeometryString(shaderQualifiers.geometry), "");
6901
if (shaderQualifiers.spacing != EvsNone)
6902
error(loc, message, TQualifier::getVertexSpacingString(shaderQualifiers.spacing), "");
6903
if (shaderQualifiers.order != EvoNone)
6904
error(loc, message, TQualifier::getVertexOrderString(shaderQualifiers.order), "");
6905
if (shaderQualifiers.pointMode)
6906
error(loc, message, "point_mode", "");
6907
if (shaderQualifiers.invocations != TQualifier::layoutNotSet)
6908
error(loc, message, "invocations", "");
6909
for (int i = 0; i < 3; ++i) {
6910
if (shaderQualifiers.localSize[i] > 1)
6911
error(loc, message, "local_size", "");
6912
if (shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet)
6913
error(loc, message, "local_size id", "");
6914
}
6915
if (shaderQualifiers.vertices != TQualifier::layoutNotSet) {
6916
if (language == EShLangGeometry || language == EShLangMesh)
6917
error(loc, message, "max_vertices", "");
6918
else if (language == EShLangTessControl)
6919
error(loc, message, "vertices", "");
6920
else
6921
assert(0);
6922
}
6923
if (shaderQualifiers.earlyFragmentTests)
6924
error(loc, message, "early_fragment_tests", "");
6925
if (shaderQualifiers.postDepthCoverage)
6926
error(loc, message, "post_depth_coverage", "");
6927
if (shaderQualifiers.nonCoherentColorAttachmentReadEXT)
6928
error(loc, message, "non_coherent_color_attachment_readEXT", "");
6929
if (shaderQualifiers.nonCoherentDepthAttachmentReadEXT)
6930
error(loc, message, "non_coherent_depth_attachment_readEXT", "");
6931
if (shaderQualifiers.nonCoherentStencilAttachmentReadEXT)
6932
error(loc, message, "non_coherent_stencil_attachment_readEXT", "");
6933
if (shaderQualifiers.primitives != TQualifier::layoutNotSet) {
6934
if (language == EShLangMesh)
6935
error(loc, message, "max_primitives", "");
6936
else
6937
assert(0);
6938
}
6939
if (shaderQualifiers.hasBlendEquation())
6940
error(loc, message, "blend equation", "");
6941
if (shaderQualifiers.numViews != TQualifier::layoutNotSet)
6942
error(loc, message, "num_views", "");
6943
if (shaderQualifiers.interlockOrdering != EioNone)
6944
error(loc, message, TQualifier::getInterlockOrderingString(shaderQualifiers.interlockOrdering), "");
6945
if (shaderQualifiers.layoutPrimitiveCulling)
6946
error(loc, "can only be applied as standalone", "primitive_culling", "");
6947
}
6948
6949
// Correct and/or advance an object's offset layout qualifier.
6950
void TParseContext::fixOffset(const TSourceLoc& loc, TSymbol& symbol)
6951
{
6952
const TQualifier& qualifier = symbol.getType().getQualifier();
6953
if (symbol.getType().isAtomic()) {
6954
if (qualifier.hasBinding() && (int)qualifier.layoutBinding < resources.maxAtomicCounterBindings) {
6955
6956
// Set the offset
6957
int offset;
6958
if (qualifier.hasOffset())
6959
offset = qualifier.layoutOffset;
6960
else
6961
offset = atomicUintOffsets[qualifier.layoutBinding];
6962
6963
if (offset % 4 != 0)
6964
error(loc, "atomic counters offset should align based on 4:", "offset", "%d", offset);
6965
6966
symbol.getWritableType().getQualifier().layoutOffset = offset;
6967
6968
// Check for overlap
6969
int numOffsets = 4;
6970
if (symbol.getType().isArray()) {
6971
if (symbol.getType().isSizedArray() && !symbol.getType().getArraySizes()->isInnerUnsized())
6972
numOffsets *= symbol.getType().getCumulativeArraySize();
6973
else {
6974
// "It is a compile-time error to declare an unsized array of atomic_uint."
6975
error(loc, "array must be explicitly sized", "atomic_uint", "");
6976
}
6977
}
6978
int repeated = intermediate.addUsedOffsets(qualifier.layoutBinding, offset, numOffsets);
6979
if (repeated >= 0)
6980
error(loc, "atomic counters sharing the same offset:", "offset", "%d", repeated);
6981
6982
// Bump the default offset
6983
atomicUintOffsets[qualifier.layoutBinding] = offset + numOffsets;
6984
}
6985
}
6986
}
6987
6988
//
6989
// Look up a function name in the symbol table, and make sure it is a function.
6990
//
6991
// Return the function symbol if found, otherwise nullptr.
6992
//
6993
const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
6994
{
6995
if (symbolTable.isFunctionNameVariable(call.getName())) {
6996
error(loc, "can't use function syntax on variable", call.getName().c_str(), "");
6997
return nullptr;
6998
}
6999
7000
const TFunction* function = nullptr;
7001
7002
// debugPrintfEXT has var args and is in the symbol table as "debugPrintfEXT()",
7003
// mangled to "debugPrintfEXT("
7004
if (call.getName() == "debugPrintfEXT") {
7005
TSymbol* symbol = symbolTable.find("debugPrintfEXT(", &builtIn);
7006
if (symbol)
7007
return symbol->getAsFunction();
7008
}
7009
7010
bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
7011
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
7012
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
7013
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
7014
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
7015
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
7016
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
7017
extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64);
7018
7019
if (isEsProfile())
7020
function = (explicitTypesEnabled && version >= 310)
7021
? findFunctionExplicitTypes(loc, call, builtIn)
7022
: ((extensionTurnedOn(E_GL_EXT_shader_implicit_conversions) && version >= 310)
7023
? findFunction120(loc, call, builtIn)
7024
: findFunctionExact(loc, call, builtIn));
7025
else if (version < 120)
7026
function = findFunctionExact(loc, call, builtIn);
7027
else if (version < 400) {
7028
bool needfindFunction400 = extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) || extensionTurnedOn(E_GL_ARB_gpu_shader5);
7029
function = needfindFunction400 ? findFunction400(loc, call, builtIn) : findFunction120(loc, call, builtIn);
7030
}
7031
else if (explicitTypesEnabled)
7032
function = findFunctionExplicitTypes(loc, call, builtIn);
7033
else
7034
function = findFunction400(loc, call, builtIn);
7035
7036
return function;
7037
}
7038
7039
// Function finding algorithm for ES and desktop 110.
7040
const TFunction* TParseContext::findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
7041
{
7042
TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
7043
if (symbol == nullptr) {
7044
error(loc, "no matching overloaded function found", call.getName().c_str(), "");
7045
7046
return nullptr;
7047
}
7048
7049
return symbol->getAsFunction();
7050
}
7051
7052
// Function finding algorithm for desktop versions 120 through 330.
7053
const TFunction* TParseContext::findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
7054
{
7055
// first, look for an exact match
7056
TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
7057
if (symbol)
7058
return symbol->getAsFunction();
7059
7060
// exact match not found, look through a list of overloaded functions of the same name
7061
7062
// "If no exact match is found, then [implicit conversions] will be applied to find a match. Mismatched types
7063
// on input parameters (in or inout or default) must have a conversion from the calling argument type to the
7064
// formal parameter type. Mismatched types on output parameters (out or inout) must have a conversion
7065
// from the formal parameter type to the calling argument type. When argument conversions are used to find
7066
// a match, it is a semantic error if there are multiple ways to apply these conversions to make the call match
7067
// more than one function."
7068
7069
const TFunction* candidate = nullptr;
7070
TVector<const TFunction*> candidateList;
7071
symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
7072
7073
for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
7074
const TFunction& function = *(*it);
7075
7076
// to even be a potential match, number of arguments has to match
7077
if (call.getParamCount() != function.getParamCount())
7078
continue;
7079
7080
bool possibleMatch = true;
7081
for (int i = 0; i < function.getParamCount(); ++i) {
7082
// same types is easy
7083
if (*function[i].type == *call[i].type)
7084
continue;
7085
7086
// We have a mismatch in type, see if it is implicitly convertible
7087
7088
if (function[i].type->isArray() || call[i].type->isArray() ||
7089
! function[i].type->sameElementShape(*call[i].type))
7090
possibleMatch = false;
7091
else {
7092
// do direction-specific checks for conversion of basic type
7093
if (function[i].type->getQualifier().isParamInput()) {
7094
if (! intermediate.canImplicitlyPromote(call[i].type->getBasicType(), function[i].type->getBasicType()))
7095
possibleMatch = false;
7096
}
7097
if (function[i].type->getQualifier().isParamOutput()) {
7098
if (! intermediate.canImplicitlyPromote(function[i].type->getBasicType(), call[i].type->getBasicType()))
7099
possibleMatch = false;
7100
}
7101
}
7102
if (! possibleMatch)
7103
break;
7104
}
7105
if (possibleMatch) {
7106
if (candidate) {
7107
// our second match, meaning ambiguity
7108
error(loc, "ambiguous function signature match: multiple signatures match under implicit type conversion", call.getName().c_str(), "");
7109
} else
7110
candidate = &function;
7111
}
7112
}
7113
7114
if (candidate == nullptr)
7115
error(loc, "no matching overloaded function found", call.getName().c_str(), "");
7116
7117
return candidate;
7118
}
7119
7120
// Function finding algorithm for desktop version 400 and above.
7121
//
7122
// "When function calls are resolved, an exact type match for all the arguments
7123
// is sought. If an exact match is found, all other functions are ignored, and
7124
// the exact match is used. If no exact match is found, then the implicit
7125
// conversions in section 4.1.10 Implicit Conversions will be applied to find
7126
// a match. Mismatched types on input parameters (in or inout or default) must
7127
// have a conversion from the calling argument type to the formal parameter type.
7128
// Mismatched types on output parameters (out or inout) must have a conversion
7129
// from the formal parameter type to the calling argument type.
7130
//
7131
// "If implicit conversions can be used to find more than one matching function,
7132
// a single best-matching function is sought. To determine a best match, the
7133
// conversions between calling argument and formal parameter types are compared
7134
// for each function argument and pair of matching functions. After these
7135
// comparisons are performed, each pair of matching functions are compared.
7136
// A function declaration A is considered a better match than function
7137
// declaration B if
7138
//
7139
// * for at least one function argument, the conversion for that argument in A
7140
// is better than the corresponding conversion in B; and
7141
// * there is no function argument for which the conversion in B is better than
7142
// the corresponding conversion in A.
7143
//
7144
// "If a single function declaration is considered a better match than every
7145
// other matching function declaration, it will be used. Otherwise, a
7146
// compile-time semantic error for an ambiguous overloaded function call occurs.
7147
//
7148
// "To determine whether the conversion for a single argument in one match is
7149
// better than that for another match, the following rules are applied, in order:
7150
//
7151
// 1. An exact match is better than a match involving any implicit conversion.
7152
// 2. A match involving an implicit conversion from float to double is better
7153
// than a match involving any other implicit conversion.
7154
// 3. A match involving an implicit conversion from either int or uint to float
7155
// is better than a match involving an implicit conversion from either int
7156
// or uint to double.
7157
//
7158
// "If none of the rules above apply to a particular pair of conversions, neither
7159
// conversion is considered better than the other."
7160
//
7161
const TFunction* TParseContext::findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
7162
{
7163
// first, look for an exact match
7164
TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
7165
if (symbol)
7166
return symbol->getAsFunction();
7167
7168
// no exact match, use the generic selector, parameterized by the GLSL rules
7169
7170
// create list of candidates to send
7171
TVector<const TFunction*> candidateList;
7172
symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
7173
7174
// can 'from' convert to 'to'?
7175
const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
7176
if (from == to)
7177
return true;
7178
if (from.coopMatParameterOK(to))
7179
return true;
7180
// Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
7181
if (builtIn && from.isArray() && to.isUnsizedArray()) {
7182
TType fromElementType(from, 0);
7183
TType toElementType(to, 0);
7184
if (fromElementType == toElementType)
7185
return true;
7186
}
7187
if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
7188
return false;
7189
if (from.isCoopMat() && to.isCoopMat())
7190
return from.sameCoopMatBaseType(to);
7191
return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
7192
};
7193
7194
// Is 'to2' a better conversion than 'to1'?
7195
// Ties should not be considered as better.
7196
// Assumes 'convertible' already said true.
7197
const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool {
7198
// 1. exact match
7199
if (from == to2)
7200
return from != to1;
7201
if (from == to1)
7202
return false;
7203
7204
// 2. float -> double is better
7205
if (from.getBasicType() == EbtFloat) {
7206
if (to2.getBasicType() == EbtDouble && to1.getBasicType() != EbtDouble)
7207
return true;
7208
}
7209
7210
// 3. -> float is better than -> double
7211
return to2.getBasicType() == EbtFloat && to1.getBasicType() == EbtDouble;
7212
};
7213
7214
// for ambiguity reporting
7215
bool tie = false;
7216
7217
// send to the generic selector
7218
const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
7219
7220
if (bestMatch == nullptr)
7221
error(loc, "no matching overloaded function found", call.getName().c_str(), "");
7222
else if (tie)
7223
error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
7224
7225
return bestMatch;
7226
}
7227
7228
// "To determine whether the conversion for a single argument in one match
7229
// is better than that for another match, the conversion is assigned of the
7230
// three ranks ordered from best to worst:
7231
// 1. Exact match: no conversion.
7232
// 2. Promotion: integral or floating-point promotion.
7233
// 3. Conversion: integral conversion, floating-point conversion,
7234
// floating-integral conversion.
7235
// A conversion C1 is better than a conversion C2 if the rank of C1 is
7236
// better than the rank of C2."
7237
const TFunction* TParseContext::findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
7238
{
7239
// first, look for an exact match
7240
TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
7241
if (symbol)
7242
return symbol->getAsFunction();
7243
7244
// no exact match, use the generic selector, parameterized by the GLSL rules
7245
7246
// create list of candidates to send
7247
TVector<const TFunction*> candidateList;
7248
symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
7249
7250
// can 'from' convert to 'to'?
7251
const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
7252
if (from == to)
7253
return true;
7254
if (from.coopMatParameterOK(to))
7255
return true;
7256
// Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
7257
if (builtIn && from.isArray() && to.isUnsizedArray()) {
7258
TType fromElementType(from, 0);
7259
TType toElementType(to, 0);
7260
if (fromElementType == toElementType)
7261
return true;
7262
}
7263
if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
7264
return false;
7265
if (from.isCoopMat() && to.isCoopMat())
7266
return from.sameCoopMatBaseType(to);
7267
return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
7268
};
7269
7270
// Is 'to2' a better conversion than 'to1'?
7271
// Ties should not be considered as better.
7272
// Assumes 'convertible' already said true.
7273
const auto better = [this](const TType& from, const TType& to1, const TType& to2) -> bool {
7274
// 1. exact match
7275
if (from == to2)
7276
return from != to1;
7277
if (from == to1)
7278
return false;
7279
7280
// 2. Promotion (integral, floating-point) is better
7281
TBasicType from_type = from.getBasicType();
7282
TBasicType to1_type = to1.getBasicType();
7283
TBasicType to2_type = to2.getBasicType();
7284
bool isPromotion1 = (intermediate.isIntegralPromotion(from_type, to1_type) ||
7285
intermediate.isFPPromotion(from_type, to1_type));
7286
bool isPromotion2 = (intermediate.isIntegralPromotion(from_type, to2_type) ||
7287
intermediate.isFPPromotion(from_type, to2_type));
7288
if (isPromotion2)
7289
return !isPromotion1;
7290
if(isPromotion1)
7291
return false;
7292
7293
// 3. Conversion (integral, floating-point , floating-integral)
7294
bool isConversion1 = (intermediate.isIntegralConversion(from_type, to1_type) ||
7295
intermediate.isFPConversion(from_type, to1_type) ||
7296
intermediate.isFPIntegralConversion(from_type, to1_type));
7297
bool isConversion2 = (intermediate.isIntegralConversion(from_type, to2_type) ||
7298
intermediate.isFPConversion(from_type, to2_type) ||
7299
intermediate.isFPIntegralConversion(from_type, to2_type));
7300
7301
return isConversion2 && !isConversion1;
7302
};
7303
7304
// for ambiguity reporting
7305
bool tie = false;
7306
7307
// send to the generic selector
7308
const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
7309
7310
if (bestMatch == nullptr)
7311
error(loc, "no matching overloaded function found", call.getName().c_str(), "");
7312
else if (tie)
7313
error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
7314
7315
return bestMatch;
7316
}
7317
7318
//
7319
// Adjust function calls that aren't declared in Vulkan to a
7320
// calls with equivalent effects
7321
//
7322
TIntermTyped* TParseContext::vkRelaxedRemapFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments)
7323
{
7324
TIntermTyped* result = nullptr;
7325
7326
if (function->getBuiltInOp() != EOpNull) {
7327
return nullptr;
7328
}
7329
7330
if (function->getName() == "atomicCounterIncrement") {
7331
// change atomicCounterIncrement into an atomicAdd of 1
7332
TString name("atomicAdd");
7333
TType uintType(EbtUint);
7334
7335
TFunction realFunc(&name, function->getType());
7336
7337
// Use copyParam to avoid shared ownership of the 'type' field
7338
// of the parameter.
7339
for (int i = 0; i < function->getParamCount(); ++i) {
7340
realFunc.addParameter(TParameter().copyParam((*function)[i]));
7341
}
7342
7343
TParameter tmpP = { nullptr, &uintType, {} };
7344
realFunc.addParameter(TParameter().copyParam(tmpP));
7345
arguments = intermediate.growAggregate(arguments, intermediate.addConstantUnion(1, loc, true));
7346
7347
result = handleFunctionCall(loc, &realFunc, arguments);
7348
} else if (function->getName() == "atomicCounterDecrement") {
7349
// change atomicCounterDecrement into an atomicAdd with -1
7350
// and subtract 1 from result, to return post-decrement value
7351
TString name("atomicAdd");
7352
TType uintType(EbtUint);
7353
7354
TFunction realFunc(&name, function->getType());
7355
7356
for (int i = 0; i < function->getParamCount(); ++i) {
7357
realFunc.addParameter(TParameter().copyParam((*function)[i]));
7358
}
7359
7360
TParameter tmpP = { nullptr, &uintType, {} };
7361
realFunc.addParameter(TParameter().copyParam(tmpP));
7362
arguments = intermediate.growAggregate(arguments, intermediate.addConstantUnion(-1, loc, true));
7363
7364
result = handleFunctionCall(loc, &realFunc, arguments);
7365
7366
// post decrement, so that it matches AtomicCounterDecrement semantics
7367
if (result) {
7368
result = handleBinaryMath(loc, "-", EOpSub, result, intermediate.addConstantUnion(1, loc, true));
7369
}
7370
} else if (function->getName() == "atomicCounter") {
7371
// change atomicCounter into a direct read of the variable
7372
if (arguments->getAsTyped()) {
7373
result = arguments->getAsTyped();
7374
}
7375
}
7376
7377
return result;
7378
}
7379
7380
// When a declaration includes a type, but not a variable name, it can be used
7381
// to establish defaults.
7382
void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType)
7383
{
7384
if (publicType.basicType == EbtAtomicUint && publicType.qualifier.hasBinding()) {
7385
if (publicType.qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
7386
error(loc, "atomic_uint binding is too large", "binding", "");
7387
return;
7388
}
7389
if (publicType.qualifier.hasOffset())
7390
atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset;
7391
return;
7392
}
7393
7394
if (publicType.arraySizes) {
7395
error(loc, "expect an array name", "", "");
7396
}
7397
7398
if (publicType.qualifier.hasLayout() && !publicType.qualifier.hasBufferReference())
7399
warn(loc, "useless application of layout qualifier", "layout", "");
7400
}
7401
7402
void TParseContext::coopMatTypeParametersCheck(const TSourceLoc& loc, const TPublicType& publicType)
7403
{
7404
if (parsingBuiltins)
7405
return;
7406
if (publicType.isCoopmatKHR()) {
7407
if (publicType.typeParameters == nullptr) {
7408
error(loc, "coopmat missing type parameters", "", "");
7409
return;
7410
}
7411
switch (publicType.typeParameters->basicType) {
7412
case EbtFloat:
7413
case EbtFloat16:
7414
case EbtInt:
7415
case EbtInt8:
7416
case EbtInt16:
7417
case EbtUint:
7418
case EbtUint8:
7419
case EbtUint16:
7420
case EbtSpirvType:
7421
break;
7422
default:
7423
error(loc, "coopmat invalid basic type", TType::getBasicString(publicType.typeParameters->basicType), "");
7424
break;
7425
}
7426
if (publicType.typeParameters->arraySizes->getNumDims() != 4) {
7427
error(loc, "coopmat incorrect number of type parameters", "", "");
7428
return;
7429
}
7430
int use = publicType.typeParameters->arraySizes->getDimSize(3);
7431
if (use < 0 || use > 2) {
7432
error(loc, "coopmat invalid matrix Use", "", "");
7433
return;
7434
}
7435
}
7436
}
7437
7438
bool TParseContext::vkRelaxedRemapUniformVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType,
7439
TArraySizes*, TIntermTyped* initializer, TType& type)
7440
{
7441
vkRelaxedRemapUniformMembers(loc, publicType, type, identifier);
7442
7443
if (parsingBuiltins || symbolTable.atBuiltInLevel() || !symbolTable.atGlobalLevel() ||
7444
type.getQualifier().storage != EvqUniform ||
7445
!(type.containsNonOpaque() || type.getBasicType() == EbtAtomicUint || (type.containsSampler() && type.isStruct()))) {
7446
return false;
7447
}
7448
7449
if (type.getQualifier().hasLocation()) {
7450
warn(loc, "ignoring layout qualifier for uniform", identifier.c_str(), "location");
7451
type.getQualifier().layoutLocation = TQualifier::layoutLocationEnd;
7452
}
7453
7454
if (initializer) {
7455
warn(loc, "Ignoring initializer for uniform", identifier.c_str(), "");
7456
initializer = nullptr;
7457
}
7458
7459
if (type.isArray()) {
7460
// do array size checks here
7461
arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false);
7462
7463
if (arrayQualifierError(loc, type.getQualifier()) || arrayError(loc, type)) {
7464
error(loc, "array param error", identifier.c_str(), "");
7465
}
7466
}
7467
7468
// do some checking on the type as it was declared
7469
layoutTypeCheck(loc, type);
7470
7471
int bufferBinding = TQualifier::layoutBindingEnd;
7472
TVariable* updatedBlock = nullptr;
7473
7474
// Convert atomic_uint into members of a buffer block
7475
if (type.isAtomic()) {
7476
type.setBasicType(EbtUint);
7477
type.getQualifier().storage = EvqBuffer;
7478
7479
type.getQualifier().volatil = true;
7480
type.getQualifier().coherent = true;
7481
7482
// xxTODO: use logic from fixOffset() to apply explicit member offset
7483
bufferBinding = type.getQualifier().layoutBinding;
7484
type.getQualifier().layoutBinding = TQualifier::layoutBindingEnd;
7485
type.getQualifier().explicitOffset = false;
7486
growAtomicCounterBlock(bufferBinding, loc, type, identifier, nullptr);
7487
updatedBlock = atomicCounterBuffers[bufferBinding];
7488
}
7489
7490
if (!updatedBlock) {
7491
growGlobalUniformBlock(loc, type, identifier, nullptr);
7492
updatedBlock = globalUniformBlock;
7493
}
7494
7495
//
7496
// don't assign explicit member offsets here
7497
// if any are assigned, need to be updated here and in the merge/link step
7498
// fixBlockUniformOffsets(updatedBlock->getWritableType().getQualifier(), *updatedBlock->getWritableType().getWritableStruct());
7499
7500
// checks on update buffer object
7501
layoutObjectCheck(loc, *updatedBlock);
7502
7503
TSymbol* symbol = symbolTable.find(identifier);
7504
7505
if (!symbol) {
7506
if (updatedBlock == globalUniformBlock)
7507
error(loc, "error adding uniform to default uniform block", identifier.c_str(), "");
7508
else
7509
error(loc, "error adding atomic counter to atomic counter block", identifier.c_str(), "");
7510
return false;
7511
}
7512
7513
// merge qualifiers
7514
mergeObjectLayoutQualifiers(updatedBlock->getWritableType().getQualifier(), type.getQualifier(), true);
7515
7516
return true;
7517
}
7518
7519
template <typename Function>
7520
static void ForEachOpaque(const TType& type, const TString& path, Function callback)
7521
{
7522
auto recursion = [&callback](const TType& type, const TString& path, bool skipArray, auto& recursion) -> void {
7523
if (!skipArray && type.isArray())
7524
{
7525
std::vector<int> indices(type.getArraySizes()->getNumDims());
7526
for (int flatIndex = 0;
7527
flatIndex < type.getArraySizes()->getCumulativeSize();
7528
++flatIndex)
7529
{
7530
TString subscriptPath = path;
7531
for (size_t dimIndex = 0; dimIndex < indices.size(); ++dimIndex)
7532
{
7533
int index = indices[dimIndex];
7534
subscriptPath.append("[");
7535
subscriptPath.append(String(index));
7536
subscriptPath.append("]");
7537
}
7538
7539
recursion(type, subscriptPath, true, recursion);
7540
7541
for (size_t dimIndex = 0; dimIndex < indices.size(); ++dimIndex)
7542
{
7543
++indices[dimIndex];
7544
if (indices[dimIndex] < type.getArraySizes()->getDimSize(dimIndex))
7545
break;
7546
else
7547
indices[dimIndex] = 0;
7548
}
7549
}
7550
}
7551
7552
else if (type.isStruct() && type.containsOpaque())
7553
{
7554
const TTypeList& types = *type.getStruct();
7555
for (const TTypeLoc& typeLoc : types)
7556
{
7557
TString nextPath = path;
7558
nextPath.append(".");
7559
nextPath.append(typeLoc.type->getFieldName());
7560
7561
recursion(*(typeLoc.type), nextPath, false, recursion);
7562
}
7563
}
7564
7565
else if (type.isOpaque())
7566
{
7567
callback(type, path);
7568
}
7569
};
7570
7571
recursion(type, path, false, recursion);
7572
}
7573
7574
void TParseContext::vkRelaxedRemapUniformMembers(const TSourceLoc& loc, const TPublicType& publicType, const TType& type,
7575
const TString& identifier)
7576
{
7577
if (!type.isStruct() || !type.containsOpaque())
7578
return;
7579
7580
ForEachOpaque(type, identifier,
7581
[&publicType, &loc, this](const TType& type, const TString& path) {
7582
TArraySizes arraySizes = {};
7583
if (type.getArraySizes()) arraySizes = *type.getArraySizes();
7584
TTypeParameters typeParameters = {};
7585
if (type.getTypeParameters()) typeParameters = *type.getTypeParameters();
7586
7587
TPublicType memberType{};
7588
memberType.basicType = type.getBasicType();
7589
memberType.sampler = type.getSampler();
7590
memberType.qualifier = type.getQualifier();
7591
memberType.vectorSize = type.getVectorSize();
7592
memberType.matrixCols = type.getMatrixCols();
7593
memberType.matrixRows = type.getMatrixRows();
7594
memberType.coopmatNV = type.isCoopMatNV();
7595
memberType.coopmatKHR = type.isCoopMatKHR();
7596
memberType.arraySizes = nullptr;
7597
memberType.userDef = nullptr;
7598
memberType.loc = loc;
7599
memberType.typeParameters = (type.getTypeParameters() ? &typeParameters : nullptr);
7600
memberType.spirvType = nullptr;
7601
7602
memberType.qualifier.storage = publicType.qualifier.storage;
7603
memberType.shaderQualifiers = publicType.shaderQualifiers;
7604
7605
TString& structMemberName = *NewPoolTString(path.c_str()); // A copy is required due to declareVariable() signature.
7606
declareVariable(loc, structMemberName, memberType, nullptr, nullptr);
7607
});
7608
}
7609
7610
void TParseContext::vkRelaxedRemapFunctionParameter(TFunction* function, TParameter& param, std::vector<int>* newParams)
7611
{
7612
function->addParameter(param);
7613
7614
if (!param.type->isStruct() || !param.type->containsOpaque())
7615
return;
7616
7617
ForEachOpaque(*param.type, (param.name ? *param.name : param.type->getFieldName()),
7618
[function, param, newParams](const TType& type, const TString& path) {
7619
TString* memberName = NewPoolTString(path.c_str());
7620
7621
TType* memberType = new TType();
7622
memberType->shallowCopy(type);
7623
memberType->getQualifier().storage = param.type->getQualifier().storage;
7624
memberType->clearArraySizes();
7625
7626
TParameter memberParam = {};
7627
memberParam.name = memberName;
7628
memberParam.type = memberType;
7629
memberParam.defaultValue = nullptr;
7630
function->addParameter(memberParam);
7631
if (newParams)
7632
newParams->push_back(function->getParamCount()-1);
7633
});
7634
}
7635
7636
//
7637
// Generates a valid GLSL dereferencing string for the input TIntermNode
7638
//
7639
struct AccessChainTraverser : public TIntermTraverser {
7640
AccessChainTraverser() : TIntermTraverser(false, false, true)
7641
{}
7642
7643
TString path = "";
7644
TStorageQualifier topLevelStorageQualifier = TStorageQualifier::EvqLast;
7645
7646
bool visitBinary(TVisit, TIntermBinary* binary) override {
7647
if (binary->getOp() == EOpIndexDirectStruct)
7648
{
7649
const TTypeList& members = *binary->getLeft()->getType().getStruct();
7650
const TTypeLoc& member =
7651
members[binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()];
7652
TString memberName = member.type->getFieldName();
7653
7654
if (path != "")
7655
path.append(".");
7656
7657
path.append(memberName);
7658
}
7659
7660
if (binary->getOp() == EOpIndexDirect)
7661
{
7662
const TConstUnionArray& indices = binary->getRight()->getAsConstantUnion()->getConstArray();
7663
for (int index = 0; index < indices.size(); ++index)
7664
{
7665
path.append("[");
7666
path.append(String(indices[index].getIConst()));
7667
path.append("]");
7668
}
7669
}
7670
7671
return true;
7672
}
7673
7674
void visitSymbol(TIntermSymbol* symbol) override {
7675
if (symbol->getType().isOpaque())
7676
topLevelStorageQualifier = symbol->getQualifier().storage;
7677
if (!IsAnonymous(symbol->getName()))
7678
path.append(symbol->getName());
7679
}
7680
};
7681
7682
TIntermNode* TParseContext::vkRelaxedRemapFunctionArgument(const TSourceLoc& loc, TFunction* function, TIntermTyped* intermTyped)
7683
{
7684
AccessChainTraverser accessChainTraverser{};
7685
intermTyped->traverse(&accessChainTraverser);
7686
7687
if (accessChainTraverser.topLevelStorageQualifier == TStorageQualifier::EvqUniform)
7688
{
7689
TParameter param = { 0, new TType, {} };
7690
param.type->shallowCopy(intermTyped->getType());
7691
7692
function->addParameter(param);
7693
return intermTyped;
7694
}
7695
7696
TParameter param = { NewPoolTString(accessChainTraverser.path.c_str()), new TType, {} };
7697
param.type->shallowCopy(intermTyped->getType());
7698
7699
std::vector<int> newParams = {};
7700
vkRelaxedRemapFunctionParameter(function, param, &newParams);
7701
7702
if (intermTyped->getType().isOpaque())
7703
{
7704
TIntermNode* remappedArgument = intermTyped;
7705
{
7706
TIntermSymbol* intermSymbol = nullptr;
7707
TSymbol* symbol = symbolTable.find(*param.name);
7708
if (symbol && symbol->getAsVariable())
7709
intermSymbol = intermediate.addSymbol(*symbol->getAsVariable(), loc);
7710
else
7711
{
7712
TVariable* variable = new TVariable(param.name, *param.type);
7713
intermSymbol = intermediate.addSymbol(*variable, loc);
7714
}
7715
7716
remappedArgument = intermSymbol;
7717
}
7718
7719
return remappedArgument;
7720
}
7721
else if (!(intermTyped->isStruct() && intermTyped->getType().containsOpaque()))
7722
return intermTyped;
7723
else
7724
{
7725
TIntermNode* remappedArgument = intermTyped;
7726
{
7727
TSymbol* symbol = symbolTable.find(*param.name);
7728
if (symbol && symbol->getAsVariable())
7729
remappedArgument = intermediate.addSymbol(*symbol->getAsVariable(), loc);
7730
}
7731
7732
if (!newParams.empty())
7733
remappedArgument = intermediate.makeAggregate(remappedArgument, loc);
7734
7735
for (int paramIndex : newParams)
7736
{
7737
TParameter& newParam = function->operator[](paramIndex);
7738
TIntermSymbol* intermSymbol = nullptr;
7739
TSymbol* symbol = symbolTable.find(*newParam.name);
7740
if (symbol && symbol->getAsVariable())
7741
intermSymbol = intermediate.addSymbol(*symbol->getAsVariable(), loc);
7742
else
7743
{
7744
TVariable* variable = new TVariable(newParam.name, *newParam.type);
7745
intermSymbol = intermediate.addSymbol(*variable, loc);
7746
}
7747
7748
remappedArgument = intermediate.growAggregate(remappedArgument, intermSymbol);
7749
}
7750
7751
return remappedArgument;
7752
}
7753
}
7754
7755
TIntermTyped* TParseContext::vkRelaxedRemapDotDereference(const TSourceLoc&, TIntermTyped& base, const TType& member,
7756
const TString& identifier)
7757
{
7758
if (!member.isOpaque())
7759
return &base;
7760
7761
AccessChainTraverser traverser{};
7762
base.traverse(&traverser);
7763
if (!traverser.path.empty())
7764
traverser.path.append(".");
7765
traverser.path.append(identifier);
7766
7767
const TSymbol* symbol = symbolTable.find(traverser.path);
7768
if (!symbol)
7769
return &base;
7770
7771
TIntermTyped* result = intermediate.addSymbol(*symbol->getAsVariable());
7772
result->setType(symbol->getType());
7773
return result;
7774
}
7775
7776
//
7777
// Do everything necessary to handle a variable (non-block) declaration.
7778
// Either redeclaring a variable, or making a new one, updating the symbol
7779
// table, and all error checking.
7780
//
7781
// Returns a subtree node that computes an initializer, if needed.
7782
// Returns nullptr if there is no code to execute for initialization.
7783
//
7784
// 'publicType' is the type part of the declaration (to the left)
7785
// 'arraySizes' is the arrayness tagged on the identifier (to the right)
7786
//
7787
TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType,
7788
TArraySizes* arraySizes, TIntermTyped* initializer)
7789
{
7790
// Make a fresh type that combines the characteristics from the individual
7791
// identifier syntax and the declaration-type syntax.
7792
TType type(publicType);
7793
type.transferArraySizes(arraySizes);
7794
type.copyArrayInnerSizes(publicType.arraySizes);
7795
arrayOfArrayVersionCheck(loc, type.getArraySizes());
7796
7797
if (initializer) {
7798
if (type.getBasicType() == EbtRayQuery) {
7799
error(loc, "ray queries can only be initialized by using the rayQueryInitializeEXT intrinsic:", "=", identifier.c_str());
7800
} else if (type.getBasicType() == EbtHitObjectNV) {
7801
error(loc, "hit objects cannot be initialized using initializers", "=", identifier.c_str());
7802
}
7803
7804
}
7805
7806
if (type.isCoopMatKHR()) {
7807
intermediate.setUseVulkanMemoryModel();
7808
intermediate.setUseStorageBuffer();
7809
7810
if (!publicType.typeParameters || !publicType.typeParameters->arraySizes ||
7811
publicType.typeParameters->arraySizes->getNumDims() != 3) {
7812
error(loc, "unexpected number type parameters", identifier.c_str(), "");
7813
}
7814
if (publicType.typeParameters) {
7815
if (!isTypeFloat(publicType.typeParameters->basicType) &&
7816
!isTypeInt(publicType.typeParameters->basicType) && publicType.typeParameters->basicType != EbtSpirvType) {
7817
error(loc, "expected 8, 16, 32, or 64 bit signed or unsigned integer or 16, 32, or 64 bit float type", identifier.c_str(), "");
7818
}
7819
}
7820
}
7821
else if (type.isCoopMatNV()) {
7822
intermediate.setUseVulkanMemoryModel();
7823
intermediate.setUseStorageBuffer();
7824
7825
if (!publicType.typeParameters || publicType.typeParameters->arraySizes->getNumDims() != 4) {
7826
error(loc, "expected four type parameters", identifier.c_str(), "");
7827
}
7828
if (publicType.typeParameters) {
7829
if (isTypeFloat(publicType.basicType) &&
7830
publicType.typeParameters->arraySizes->getDimSize(0) != 16 &&
7831
publicType.typeParameters->arraySizes->getDimSize(0) != 32 &&
7832
publicType.typeParameters->arraySizes->getDimSize(0) != 64) {
7833
error(loc, "expected 16, 32, or 64 bits for first type parameter", identifier.c_str(), "");
7834
}
7835
if (isTypeInt(publicType.basicType) &&
7836
publicType.typeParameters->arraySizes->getDimSize(0) != 8 &&
7837
publicType.typeParameters->arraySizes->getDimSize(0) != 16 &&
7838
publicType.typeParameters->arraySizes->getDimSize(0) != 32) {
7839
error(loc, "expected 8, 16, or 32 bits for first type parameter", identifier.c_str(), "");
7840
}
7841
}
7842
} else {
7843
if (publicType.typeParameters && publicType.typeParameters->arraySizes->getNumDims() != 0) {
7844
error(loc, "unexpected type parameters", identifier.c_str(), "");
7845
}
7846
}
7847
7848
if (voidErrorCheck(loc, identifier, type.getBasicType()))
7849
return nullptr;
7850
7851
if (initializer)
7852
rValueErrorCheck(loc, "initializer", initializer);
7853
else
7854
nonInitConstCheck(loc, identifier, type);
7855
7856
samplerCheck(loc, type, identifier, initializer);
7857
transparentOpaqueCheck(loc, type, identifier);
7858
atomicUintCheck(loc, type, identifier);
7859
accStructCheck(loc, type, identifier);
7860
checkAndResizeMeshViewDim(loc, type, /*isBlockMember*/ false);
7861
if (type.getQualifier().storage == EvqConst && type.containsReference()) {
7862
error(loc, "variables with reference type can't have qualifier 'const'", "qualifier", "");
7863
}
7864
7865
if (type.getQualifier().storage != EvqUniform && type.getQualifier().storage != EvqBuffer) {
7866
if (type.contains16BitFloat())
7867
requireFloat16Arithmetic(loc, "qualifier", "float16 types can only be in uniform block or buffer storage");
7868
if (type.contains16BitInt())
7869
requireInt16Arithmetic(loc, "qualifier", "(u)int16 types can only be in uniform block or buffer storage");
7870
if (type.contains8BitInt())
7871
requireInt8Arithmetic(loc, "qualifier", "(u)int8 types can only be in uniform block or buffer storage");
7872
}
7873
7874
if (type.getQualifier().storage == EvqtaskPayloadSharedEXT)
7875
intermediate.addTaskPayloadEXTCount();
7876
if (type.getQualifier().storage == EvqShared && type.containsCoopMat())
7877
error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", "");
7878
7879
if (profile == EEsProfile) {
7880
if (type.getQualifier().isPipeInput() && type.getBasicType() == EbtStruct) {
7881
if (type.getQualifier().isArrayedIo(language)) {
7882
TType perVertexType(type, 0);
7883
if (perVertexType.containsArray() && perVertexType.containsBuiltIn() == false) {
7884
error(loc, "A per vertex structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), "");
7885
}
7886
}
7887
else if (type.containsArray() && type.containsBuiltIn() == false) {
7888
error(loc, "A structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), "");
7889
}
7890
if (type.containsStructure())
7891
error(loc, "A structure containing an struct is not allowed as input in ES", type.getTypeName().c_str(), "");
7892
}
7893
}
7894
7895
if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger))
7896
error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", "");
7897
if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.getDepth() != EldNone)
7898
error(loc, "can only apply depth layout to gl_FragDepth", "layout qualifier", "");
7899
if (identifier != "gl_FragStencilRefARB" && publicType.shaderQualifiers.getStencil() != ElsNone)
7900
error(loc, "can only apply depth layout to gl_FragStencilRefARB", "layout qualifier", "");
7901
7902
// Check for redeclaration of built-ins and/or attempting to declare a reserved name
7903
TSymbol* symbol = redeclareBuiltinVariable(loc, identifier, type.getQualifier(), publicType.shaderQualifiers);
7904
if (symbol == nullptr)
7905
reservedErrorCheck(loc, identifier);
7906
7907
if (symbol == nullptr && spvVersion.vulkan > 0 && spvVersion.vulkanRelaxed) {
7908
bool remapped = vkRelaxedRemapUniformVariable(loc, identifier, publicType, arraySizes, initializer, type);
7909
7910
if (remapped) {
7911
return nullptr;
7912
}
7913
}
7914
7915
inheritGlobalDefaults(type.getQualifier());
7916
7917
// Declare the variable
7918
if (type.isArray()) {
7919
// Check that implicit sizing is only where allowed.
7920
arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false);
7921
7922
if (! arrayQualifierError(loc, type.getQualifier()) && ! arrayError(loc, type))
7923
declareArray(loc, identifier, type, symbol);
7924
7925
if (initializer) {
7926
profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "initializer");
7927
profileRequires(loc, EEsProfile, 300, nullptr, "initializer");
7928
}
7929
} else {
7930
// non-array case
7931
if (symbol == nullptr)
7932
symbol = declareNonArray(loc, identifier, type);
7933
else if (type != symbol->getType())
7934
error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str());
7935
}
7936
7937
if (symbol == nullptr)
7938
return nullptr;
7939
7940
// Deal with initializer
7941
TIntermNode* initNode = nullptr;
7942
if (symbol != nullptr && initializer) {
7943
TVariable* variable = symbol->getAsVariable();
7944
if (! variable) {
7945
error(loc, "initializer requires a variable, not a member", identifier.c_str(), "");
7946
return nullptr;
7947
}
7948
initNode = executeInitializer(loc, initializer, variable);
7949
}
7950
7951
// look for errors in layout qualifier use
7952
layoutObjectCheck(loc, *symbol);
7953
7954
// fix up
7955
fixOffset(loc, *symbol);
7956
7957
return initNode;
7958
}
7959
7960
// Pick up global defaults from the provide global defaults into dst.
7961
void TParseContext::inheritGlobalDefaults(TQualifier& dst) const
7962
{
7963
if (dst.storage == EvqVaryingOut) {
7964
if (! dst.hasStream() && language == EShLangGeometry)
7965
dst.layoutStream = globalOutputDefaults.layoutStream;
7966
if (! dst.hasXfbBuffer())
7967
dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
7968
}
7969
}
7970
7971
//
7972
// Make an internal-only variable whose name is for debug purposes only
7973
// and won't be searched for. Callers will only use the return value to use
7974
// the variable, not the name to look it up. It is okay if the name
7975
// is the same as other names; there won't be any conflict.
7976
//
7977
TVariable* TParseContext::makeInternalVariable(const char* name, const TType& type) const
7978
{
7979
TString* nameString = NewPoolTString(name);
7980
TVariable* variable = new TVariable(nameString, type);
7981
symbolTable.makeInternalVariable(*variable);
7982
7983
return variable;
7984
}
7985
7986
//
7987
// Declare a non-array variable, the main point being there is no redeclaration
7988
// for resizing allowed.
7989
//
7990
// Return the successfully declared variable.
7991
//
7992
TVariable* TParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type)
7993
{
7994
// make a new variable
7995
TVariable* variable = new TVariable(&identifier, type);
7996
7997
ioArrayCheck(loc, type, identifier);
7998
7999
// add variable to symbol table
8000
if (symbolTable.insert(*variable)) {
8001
if (symbolTable.atGlobalLevel())
8002
trackLinkage(*variable);
8003
return variable;
8004
}
8005
8006
error(loc, "redefinition", variable->getName().c_str(), "");
8007
return nullptr;
8008
}
8009
8010
//
8011
// Handle all types of initializers from the grammar.
8012
//
8013
// Returning nullptr just means there is no code to execute to handle the
8014
// initializer, which will, for example, be the case for constant initializers.
8015
//
8016
TIntermNode* TParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable)
8017
{
8018
// A null initializer is an aggregate that hasn't had an op assigned yet
8019
// (still EOpNull, no relation to nullInit), and has no children.
8020
bool nullInit = initializer->getAsAggregate() && initializer->getAsAggregate()->getOp() == EOpNull &&
8021
initializer->getAsAggregate()->getSequence().size() == 0;
8022
8023
//
8024
// Identifier must be of type constant, a global, or a temporary, and
8025
// starting at version 120, desktop allows uniforms to have initializers.
8026
//
8027
TStorageQualifier qualifier = variable->getType().getQualifier().storage;
8028
if (! (qualifier == EvqTemporary || qualifier == EvqGlobal || qualifier == EvqConst ||
8029
(qualifier == EvqUniform && !isEsProfile() && version >= 120))) {
8030
if (qualifier == EvqShared) {
8031
// GL_EXT_null_initializer allows this for shared, if it's a null initializer
8032
if (nullInit) {
8033
const char* feature = "initialization with shared qualifier";
8034
profileRequires(loc, EEsProfile, 0, E_GL_EXT_null_initializer, feature);
8035
profileRequires(loc, ~EEsProfile, 0, E_GL_EXT_null_initializer, feature);
8036
} else {
8037
error(loc, "initializer can only be a null initializer ('{}')", "shared", "");
8038
}
8039
} else {
8040
error(loc, " cannot initialize this type of qualifier ",
8041
variable->getType().getStorageQualifierString(), "");
8042
return nullptr;
8043
}
8044
}
8045
8046
if (nullInit) {
8047
// only some types can be null initialized
8048
if (variable->getType().containsUnsizedArray()) {
8049
error(loc, "null initializers can't size unsized arrays", "{}", "");
8050
return nullptr;
8051
}
8052
if (variable->getType().containsOpaque()) {
8053
error(loc, "null initializers can't be used on opaque values", "{}", "");
8054
return nullptr;
8055
}
8056
variable->getWritableType().getQualifier().setNullInit();
8057
return nullptr;
8058
}
8059
8060
arrayObjectCheck(loc, variable->getType(), "array initializer");
8061
8062
//
8063
// If the initializer was from braces { ... }, we convert the whole subtree to a
8064
// constructor-style subtree, allowing the rest of the code to operate
8065
// identically for both kinds of initializers.
8066
//
8067
// Type can't be deduced from the initializer list, so a skeletal type to
8068
// follow has to be passed in. Constness and specialization-constness
8069
// should be deduced bottom up, not dictated by the skeletal type.
8070
//
8071
TType skeletalType;
8072
skeletalType.shallowCopy(variable->getType());
8073
skeletalType.getQualifier().makeTemporary();
8074
initializer = convertInitializerList(loc, skeletalType, initializer);
8075
if (! initializer) {
8076
// error recovery; don't leave const without constant values
8077
if (qualifier == EvqConst)
8078
variable->getWritableType().getQualifier().makeTemporary();
8079
return nullptr;
8080
}
8081
8082
// Fix outer arrayness if variable is unsized, getting size from the initializer
8083
if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray())
8084
variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize());
8085
8086
// Inner arrayness can also get set by an initializer
8087
if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() &&
8088
initializer->getType().getArraySizes()->getNumDims() ==
8089
variable->getType().getArraySizes()->getNumDims()) {
8090
// adopt unsized sizes from the initializer's sizes
8091
for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) {
8092
if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) {
8093
variable->getWritableType().getArraySizes()->setDimSize(d,
8094
initializer->getType().getArraySizes()->getDimSize(d));
8095
}
8096
}
8097
}
8098
8099
// Uniforms require a compile-time constant initializer
8100
if (qualifier == EvqUniform && ! initializer->getType().getQualifier().isFrontEndConstant()) {
8101
error(loc, "uniform initializers must be constant", "=", "'%s'",
8102
variable->getType().getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8103
variable->getWritableType().getQualifier().makeTemporary();
8104
return nullptr;
8105
}
8106
// Global consts require a constant initializer (specialization constant is okay)
8107
if (qualifier == EvqConst && symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
8108
error(loc, "global const initializers must be constant", "=", "'%s'",
8109
variable->getType().getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8110
variable->getWritableType().getQualifier().makeTemporary();
8111
return nullptr;
8112
}
8113
8114
// Const variables require a constant initializer, depending on version
8115
if (qualifier == EvqConst) {
8116
if (! initializer->getType().getQualifier().isConstant()) {
8117
const char* initFeature = "non-constant initializer";
8118
requireProfile(loc, ~EEsProfile, initFeature);
8119
profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
8120
variable->getWritableType().getQualifier().storage = EvqConstReadOnly;
8121
qualifier = EvqConstReadOnly;
8122
}
8123
} else {
8124
// Non-const global variables in ES need a const initializer.
8125
//
8126
// "In declarations of global variables with no storage qualifier or with a const
8127
// qualifier any initializer must be a constant expression."
8128
if (symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
8129
const char* initFeature =
8130
"non-constant global initializer (needs GL_EXT_shader_non_constant_global_initializers)";
8131
if (isEsProfile()) {
8132
if (relaxedErrors() && ! extensionTurnedOn(E_GL_EXT_shader_non_constant_global_initializers))
8133
warn(loc, "not allowed in this version", initFeature, "");
8134
else
8135
profileRequires(loc, EEsProfile, 0, E_GL_EXT_shader_non_constant_global_initializers, initFeature);
8136
}
8137
}
8138
}
8139
8140
if (qualifier == EvqConst || qualifier == EvqUniform) {
8141
// Compile-time tagging of the variable with its constant value...
8142
8143
initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer);
8144
if (! initializer || ! initializer->getType().getQualifier().isConstant() ||
8145
variable->getType() != initializer->getType()) {
8146
error(loc, "non-matching or non-convertible constant type for const initializer",
8147
variable->getType().getStorageQualifierString(), "");
8148
variable->getWritableType().getQualifier().makeTemporary();
8149
return nullptr;
8150
}
8151
8152
// We either have a folded constant in getAsConstantUnion, or we have to use
8153
// the initializer's subtree in the AST to represent the computation of a
8154
// specialization constant.
8155
assert(initializer->getAsConstantUnion() || initializer->getType().getQualifier().isSpecConstant());
8156
if (initializer->getAsConstantUnion())
8157
variable->setConstArray(initializer->getAsConstantUnion()->getConstArray());
8158
else {
8159
// It's a specialization constant.
8160
variable->getWritableType().getQualifier().makeSpecConstant();
8161
8162
// Keep the subtree that computes the specialization constant with the variable.
8163
// Later, a symbol node will adopt the subtree from the variable.
8164
variable->setConstSubtree(initializer);
8165
}
8166
} else {
8167
// normal assigning of a value to a variable...
8168
specializationCheck(loc, initializer->getType(), "initializer");
8169
TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc);
8170
TIntermTyped* initNode = intermediate.addAssign(EOpAssign, intermSymbol, initializer, loc);
8171
if (! initNode)
8172
assignError(loc, "=", intermSymbol->getCompleteString(intermediate.getEnhancedMsgs()), initializer->getCompleteString(intermediate.getEnhancedMsgs()));
8173
8174
return initNode;
8175
}
8176
8177
return nullptr;
8178
}
8179
8180
//
8181
// Reprocess any initializer-list (the "{ ... }" syntax) parts of the
8182
// initializer.
8183
//
8184
// Need to hierarchically assign correct types and implicit
8185
// conversions. Will do this mimicking the same process used for
8186
// creating a constructor-style initializer, ensuring we get the
8187
// same form. However, it has to in parallel walk the 'type'
8188
// passed in, as type cannot be deduced from an initializer list.
8189
//
8190
TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type, TIntermTyped* initializer)
8191
{
8192
// Will operate recursively. Once a subtree is found that is constructor style,
8193
// everything below it is already good: Only the "top part" of the initializer
8194
// can be an initializer list, where "top part" can extend for several (or all) levels.
8195
8196
// see if we have bottomed out in the tree within the initializer-list part
8197
TIntermAggregate* initList = initializer->getAsAggregate();
8198
if (! initList || initList->getOp() != EOpNull)
8199
return initializer;
8200
8201
// Of the initializer-list set of nodes, need to process bottom up,
8202
// so recurse deep, then process on the way up.
8203
8204
// Go down the tree here...
8205
if (type.isArray()) {
8206
// The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate.
8207
// Later on, initializer execution code will deal with array size logic.
8208
TType arrayType;
8209
arrayType.shallowCopy(type); // sharing struct stuff is fine
8210
arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below
8211
8212
// edit array sizes to fill in unsized dimensions
8213
arrayType.changeOuterArraySize((int)initList->getSequence().size());
8214
TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped();
8215
if (arrayType.isArrayOfArrays() && firstInit->getType().isArray() &&
8216
arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) {
8217
for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) {
8218
if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize)
8219
arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1));
8220
}
8221
}
8222
8223
TType elementType(arrayType, 0); // dereferenced type
8224
for (size_t i = 0; i < initList->getSequence().size(); ++i) {
8225
initList->getSequence()[i] = convertInitializerList(loc, elementType, initList->getSequence()[i]->getAsTyped());
8226
if (initList->getSequence()[i] == nullptr)
8227
return nullptr;
8228
}
8229
8230
return addConstructor(loc, initList, arrayType);
8231
} else if (type.isStruct()) {
8232
if (type.getStruct()->size() != initList->getSequence().size()) {
8233
error(loc, "wrong number of structure members", "initializer list", "");
8234
return nullptr;
8235
}
8236
for (size_t i = 0; i < type.getStruct()->size(); ++i) {
8237
initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type, initList->getSequence()[i]->getAsTyped());
8238
if (initList->getSequence()[i] == nullptr)
8239
return nullptr;
8240
}
8241
} else if (type.isMatrix()) {
8242
if (type.getMatrixCols() != (int)initList->getSequence().size()) {
8243
error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8244
return nullptr;
8245
}
8246
TType vectorType(type, 0); // dereferenced type
8247
for (int i = 0; i < type.getMatrixCols(); ++i) {
8248
initList->getSequence()[i] = convertInitializerList(loc, vectorType, initList->getSequence()[i]->getAsTyped());
8249
if (initList->getSequence()[i] == nullptr)
8250
return nullptr;
8251
}
8252
} else if (type.isVector()) {
8253
if (type.getVectorSize() != (int)initList->getSequence().size()) {
8254
error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8255
return nullptr;
8256
}
8257
TBasicType destType = type.getBasicType();
8258
for (int i = 0; i < type.getVectorSize(); ++i) {
8259
TBasicType initType = initList->getSequence()[i]->getAsTyped()->getBasicType();
8260
if (destType != initType && !intermediate.canImplicitlyPromote(initType, destType)) {
8261
error(loc, "type mismatch in initializer list", "initializer list", type.getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8262
return nullptr;
8263
}
8264
8265
}
8266
} else {
8267
error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString(intermediate.getEnhancedMsgs()).c_str());
8268
return nullptr;
8269
}
8270
8271
// Now that the subtree is processed, process this node as if the
8272
// initializer list is a set of arguments to a constructor.
8273
TIntermNode* emulatedConstructorArguments;
8274
if (initList->getSequence().size() == 1)
8275
emulatedConstructorArguments = initList->getSequence()[0];
8276
else
8277
emulatedConstructorArguments = initList;
8278
return addConstructor(loc, emulatedConstructorArguments, type);
8279
}
8280
8281
//
8282
// Test for the correctness of the parameters passed to various constructor functions
8283
// and also convert them to the right data type, if allowed and required.
8284
//
8285
// 'node' is what to construct from.
8286
// 'type' is what type to construct.
8287
//
8288
// Returns nullptr for an error or the constructed node (aggregate or typed) for no error.
8289
//
8290
TIntermTyped* TParseContext::addConstructor(const TSourceLoc& loc, TIntermNode* node, const TType& type)
8291
{
8292
if (node == nullptr || node->getAsTyped() == nullptr)
8293
return nullptr;
8294
rValueErrorCheck(loc, "constructor", node->getAsTyped());
8295
8296
TIntermAggregate* aggrNode = node->getAsAggregate();
8297
TOperator op = intermediate.mapTypeToConstructorOp(type);
8298
8299
// Combined texture-sampler constructors are completely semantic checked
8300
// in constructorTextureSamplerError()
8301
if (op == EOpConstructTextureSampler) {
8302
if (aggrNode != nullptr) {
8303
if (aggrNode->getSequence()[1]->getAsTyped()->getType().getSampler().shadow) {
8304
// Transfer depth into the texture (SPIR-V image) type, as a hint
8305
// for tools to know this texture/image is a depth image.
8306
aggrNode->getSequence()[0]->getAsTyped()->getWritableType().getSampler().shadow = true;
8307
}
8308
return intermediate.setAggregateOperator(aggrNode, op, type, loc);
8309
}
8310
}
8311
8312
TTypeList::const_iterator memberTypes;
8313
if (op == EOpConstructStruct)
8314
memberTypes = type.getStruct()->begin();
8315
8316
TType elementType;
8317
if (type.isArray()) {
8318
TType dereferenced(type, 0);
8319
elementType.shallowCopy(dereferenced);
8320
} else
8321
elementType.shallowCopy(type);
8322
8323
bool singleArg;
8324
if (aggrNode) {
8325
if (aggrNode->getOp() != EOpNull)
8326
singleArg = true;
8327
else
8328
singleArg = false;
8329
} else
8330
singleArg = true;
8331
8332
TIntermTyped *newNode;
8333
if (singleArg) {
8334
// If structure constructor or array constructor is being called
8335
// for only one parameter inside the structure, we need to call constructAggregate function once.
8336
if (type.isArray())
8337
newNode = constructAggregate(node, elementType, 1, node->getLoc());
8338
else if (op == EOpConstructStruct)
8339
newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc());
8340
else
8341
newNode = constructBuiltIn(type, op, node->getAsTyped(), node->getLoc(), false);
8342
8343
if (newNode && (type.isArray() || op == EOpConstructStruct))
8344
newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc);
8345
8346
return newNode;
8347
}
8348
8349
//
8350
// Handle list of arguments.
8351
//
8352
TIntermSequence &sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor
8353
// if the structure constructor contains more than one parameter, then construct
8354
// each parameter
8355
8356
int paramCount = 0; // keeps track of the constructor parameter number being checked
8357
8358
// for each parameter to the constructor call, check to see if the right type is passed or convert them
8359
// to the right type if possible (and allowed).
8360
// for structure constructors, just check if the right type is passed, no conversion is allowed.
8361
for (TIntermSequence::iterator p = sequenceVector.begin();
8362
p != sequenceVector.end(); p++, paramCount++) {
8363
if (type.isArray())
8364
newNode = constructAggregate(*p, elementType, paramCount+1, node->getLoc());
8365
else if (op == EOpConstructStruct)
8366
newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount+1, node->getLoc());
8367
else
8368
newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true);
8369
8370
if (newNode)
8371
*p = newNode;
8372
else
8373
return nullptr;
8374
}
8375
8376
TIntermTyped *ret_node = intermediate.setAggregateOperator(aggrNode, op, type, loc);
8377
8378
TIntermAggregate *agg_node = ret_node->getAsAggregate();
8379
if (agg_node && (agg_node->isVector() || agg_node->isArray() || agg_node->isMatrix()))
8380
agg_node->updatePrecision();
8381
8382
return ret_node;
8383
}
8384
8385
// Function for constructor implementation. Calls addUnaryMath with appropriate EOp value
8386
// for the parameter to the constructor (passed to this function). Essentially, it converts
8387
// the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a
8388
// float, then float is converted to int.
8389
//
8390
// Returns nullptr for an error or the constructed node.
8391
//
8392
TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node, const TSourceLoc& loc,
8393
bool subset)
8394
{
8395
// If we are changing a matrix in both domain of basic type and to a non matrix,
8396
// do the shape change first (by default, below, basic type is changed before shape).
8397
// This avoids requesting a matrix of a new type that is going to be discarded anyway.
8398
// TODO: This could be generalized to more type combinations, but that would require
8399
// more extensive testing and full algorithm rework. For now, the need to do two changes makes
8400
// the recursive call work, and avoids the most egregious case of creating integer matrices.
8401
if (node->getType().isMatrix() && (type.isScalar() || type.isVector()) &&
8402
type.isFloatingDomain() != node->getType().isFloatingDomain()) {
8403
TType transitionType(node->getBasicType(), glslang::EvqTemporary, type.getVectorSize(), 0, 0, node->isVector());
8404
TOperator transitionOp = intermediate.mapTypeToConstructorOp(transitionType);
8405
node = constructBuiltIn(transitionType, transitionOp, node, loc, false);
8406
}
8407
8408
TIntermTyped* newNode;
8409
TOperator basicOp;
8410
8411
//
8412
// First, convert types as needed.
8413
//
8414
switch (op) {
8415
case EOpConstructVec2:
8416
case EOpConstructVec3:
8417
case EOpConstructVec4:
8418
case EOpConstructMat2x2:
8419
case EOpConstructMat2x3:
8420
case EOpConstructMat2x4:
8421
case EOpConstructMat3x2:
8422
case EOpConstructMat3x3:
8423
case EOpConstructMat3x4:
8424
case EOpConstructMat4x2:
8425
case EOpConstructMat4x3:
8426
case EOpConstructMat4x4:
8427
case EOpConstructFloat:
8428
basicOp = EOpConstructFloat;
8429
break;
8430
8431
case EOpConstructIVec2:
8432
case EOpConstructIVec3:
8433
case EOpConstructIVec4:
8434
case EOpConstructInt:
8435
basicOp = EOpConstructInt;
8436
break;
8437
8438
case EOpConstructUVec2:
8439
if (node->getType().getBasicType() == EbtReference) {
8440
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "reference conversion to uvec2");
8441
TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUvec2, true, node,
8442
type);
8443
return newNode;
8444
} else if (node->getType().getBasicType() == EbtSampler) {
8445
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "sampler conversion to uvec2");
8446
// force the basic type of the constructor param to uvec2, otherwise spv builder will
8447
// report some errors
8448
TIntermTyped* newSrcNode = intermediate.createConversion(EbtUint, node);
8449
newSrcNode->getAsTyped()->getWritableType().setVectorSize(2);
8450
8451
TIntermTyped* newNode =
8452
intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructUVec2, false, newSrcNode, type);
8453
return newNode;
8454
}
8455
[[fallthrough]];
8456
case EOpConstructUVec3:
8457
case EOpConstructUVec4:
8458
case EOpConstructUint:
8459
basicOp = EOpConstructUint;
8460
break;
8461
8462
case EOpConstructBVec2:
8463
case EOpConstructBVec3:
8464
case EOpConstructBVec4:
8465
case EOpConstructBool:
8466
basicOp = EOpConstructBool;
8467
break;
8468
case EOpConstructTextureSampler:
8469
if ((node->getType().getBasicType() == EbtUint || node->getType().getBasicType() == EbtInt) &&
8470
node->getType().getVectorSize() == 2) {
8471
requireExtensions(loc, 1, &E_GL_ARB_bindless_texture, "ivec2/uvec2 convert to texture handle");
8472
// No matter ivec2 or uvec2, Set EOpPackUint2x32 just to generate an opBitcast op code
8473
TIntermTyped* newNode =
8474
intermediate.addBuiltInFunctionCall(node->getLoc(), EOpPackUint2x32, true, node, type);
8475
return newNode;
8476
}
8477
[[fallthrough]];
8478
case EOpConstructDVec2:
8479
case EOpConstructDVec3:
8480
case EOpConstructDVec4:
8481
case EOpConstructDMat2x2:
8482
case EOpConstructDMat2x3:
8483
case EOpConstructDMat2x4:
8484
case EOpConstructDMat3x2:
8485
case EOpConstructDMat3x3:
8486
case EOpConstructDMat3x4:
8487
case EOpConstructDMat4x2:
8488
case EOpConstructDMat4x3:
8489
case EOpConstructDMat4x4:
8490
case EOpConstructDouble:
8491
basicOp = EOpConstructDouble;
8492
break;
8493
8494
case EOpConstructF16Vec2:
8495
case EOpConstructF16Vec3:
8496
case EOpConstructF16Vec4:
8497
case EOpConstructF16Mat2x2:
8498
case EOpConstructF16Mat2x3:
8499
case EOpConstructF16Mat2x4:
8500
case EOpConstructF16Mat3x2:
8501
case EOpConstructF16Mat3x3:
8502
case EOpConstructF16Mat3x4:
8503
case EOpConstructF16Mat4x2:
8504
case EOpConstructF16Mat4x3:
8505
case EOpConstructF16Mat4x4:
8506
case EOpConstructFloat16:
8507
basicOp = EOpConstructFloat16;
8508
// 8/16-bit storage extensions don't support constructing composites of 8/16-bit types,
8509
// so construct a 32-bit type and convert
8510
if (!intermediate.getArithemeticFloat16Enabled()) {
8511
TType tempType(EbtFloat, EvqTemporary, type.getVectorSize());
8512
newNode = node;
8513
if (tempType != newNode->getType()) {
8514
TOperator aggregateOp;
8515
if (op == EOpConstructFloat16)
8516
aggregateOp = EOpConstructFloat;
8517
else
8518
aggregateOp = (TOperator)(EOpConstructVec2 + op - EOpConstructF16Vec2);
8519
newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc());
8520
}
8521
newNode = intermediate.addConversion(EbtFloat16, newNode);
8522
return newNode;
8523
}
8524
break;
8525
8526
case EOpConstructI8Vec2:
8527
case EOpConstructI8Vec3:
8528
case EOpConstructI8Vec4:
8529
case EOpConstructInt8:
8530
basicOp = EOpConstructInt8;
8531
// 8/16-bit storage extensions don't support constructing composites of 8/16-bit types,
8532
// so construct a 32-bit type and convert
8533
if (!intermediate.getArithemeticInt8Enabled()) {
8534
TType tempType(EbtInt, EvqTemporary, type.getVectorSize());
8535
newNode = node;
8536
if (tempType != newNode->getType()) {
8537
TOperator aggregateOp;
8538
if (op == EOpConstructInt8)
8539
aggregateOp = EOpConstructInt;
8540
else
8541
aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI8Vec2);
8542
newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc());
8543
}
8544
newNode = intermediate.addConversion(EbtInt8, newNode);
8545
return newNode;
8546
}
8547
break;
8548
8549
case EOpConstructU8Vec2:
8550
case EOpConstructU8Vec3:
8551
case EOpConstructU8Vec4:
8552
case EOpConstructUint8:
8553
basicOp = EOpConstructUint8;
8554
// 8/16-bit storage extensions don't support constructing composites of 8/16-bit types,
8555
// so construct a 32-bit type and convert
8556
if (!intermediate.getArithemeticInt8Enabled()) {
8557
TType tempType(EbtUint, EvqTemporary, type.getVectorSize());
8558
newNode = node;
8559
if (tempType != newNode->getType()) {
8560
TOperator aggregateOp;
8561
if (op == EOpConstructUint8)
8562
aggregateOp = EOpConstructUint;
8563
else
8564
aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU8Vec2);
8565
newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc());
8566
}
8567
newNode = intermediate.addConversion(EbtUint8, newNode);
8568
return newNode;
8569
}
8570
break;
8571
8572
case EOpConstructI16Vec2:
8573
case EOpConstructI16Vec3:
8574
case EOpConstructI16Vec4:
8575
case EOpConstructInt16:
8576
basicOp = EOpConstructInt16;
8577
// 8/16-bit storage extensions don't support constructing composites of 8/16-bit types,
8578
// so construct a 32-bit type and convert
8579
if (!intermediate.getArithemeticInt16Enabled()) {
8580
TType tempType(EbtInt, EvqTemporary, type.getVectorSize());
8581
newNode = node;
8582
if (tempType != newNode->getType()) {
8583
TOperator aggregateOp;
8584
if (op == EOpConstructInt16)
8585
aggregateOp = EOpConstructInt;
8586
else
8587
aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI16Vec2);
8588
newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc());
8589
}
8590
newNode = intermediate.addConversion(EbtInt16, newNode);
8591
return newNode;
8592
}
8593
break;
8594
8595
case EOpConstructU16Vec2:
8596
case EOpConstructU16Vec3:
8597
case EOpConstructU16Vec4:
8598
case EOpConstructUint16:
8599
basicOp = EOpConstructUint16;
8600
// 8/16-bit storage extensions don't support constructing composites of 8/16-bit types,
8601
// so construct a 32-bit type and convert
8602
if (!intermediate.getArithemeticInt16Enabled()) {
8603
TType tempType(EbtUint, EvqTemporary, type.getVectorSize());
8604
newNode = node;
8605
if (tempType != newNode->getType()) {
8606
TOperator aggregateOp;
8607
if (op == EOpConstructUint16)
8608
aggregateOp = EOpConstructUint;
8609
else
8610
aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU16Vec2);
8611
newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc());
8612
}
8613
newNode = intermediate.addConversion(EbtUint16, newNode);
8614
return newNode;
8615
}
8616
break;
8617
8618
case EOpConstructI64Vec2:
8619
case EOpConstructI64Vec3:
8620
case EOpConstructI64Vec4:
8621
case EOpConstructInt64:
8622
basicOp = EOpConstructInt64;
8623
break;
8624
8625
case EOpConstructUint64:
8626
if (type.isScalar() && node->getType().isReference()) {
8627
TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUint64, true, node, type);
8628
return newNode;
8629
}
8630
[[fallthrough]];
8631
case EOpConstructU64Vec2:
8632
case EOpConstructU64Vec3:
8633
case EOpConstructU64Vec4:
8634
basicOp = EOpConstructUint64;
8635
break;
8636
8637
case EOpConstructNonuniform:
8638
// Make a nonuniform copy of node
8639
newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpCopyObject, true, node, type);
8640
return newNode;
8641
8642
case EOpConstructReference:
8643
// construct reference from reference
8644
if (node->getType().isReference()) {
8645
newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructReference, true, node, type);
8646
return newNode;
8647
// construct reference from uint64
8648
} else if (node->getType().isScalar() && node->getType().getBasicType() == EbtUint64) {
8649
TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToPtr, true, node,
8650
type);
8651
return newNode;
8652
// construct reference from uvec2
8653
} else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint &&
8654
node->getVectorSize() == 2) {
8655
requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "uvec2 conversion to reference");
8656
TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToPtr, true, node,
8657
type);
8658
return newNode;
8659
} else {
8660
return nullptr;
8661
}
8662
8663
case EOpConstructCooperativeMatrixNV:
8664
case EOpConstructCooperativeMatrixKHR:
8665
if (node->getType() == type) {
8666
return node;
8667
}
8668
if (!node->getType().isCoopMat()) {
8669
if (type.getBasicType() != node->getType().getBasicType()) {
8670
node = intermediate.addConversion(type.getBasicType(), node);
8671
if (node == nullptr)
8672
return nullptr;
8673
}
8674
node = intermediate.setAggregateOperator(node, op, type, node->getLoc());
8675
} else {
8676
TOperator op = EOpNull;
8677
switch (type.getBasicType()) {
8678
default:
8679
assert(0);
8680
break;
8681
case EbtInt:
8682
switch (node->getType().getBasicType()) {
8683
case EbtFloat: op = EOpConvFloatToInt; break;
8684
case EbtFloat16: op = EOpConvFloat16ToInt; break;
8685
case EbtUint8: op = EOpConvUint8ToInt; break;
8686
case EbtInt8: op = EOpConvInt8ToInt; break;
8687
case EbtUint16: op = EOpConvUint16ToInt; break;
8688
case EbtInt16: op = EOpConvInt16ToInt; break;
8689
case EbtUint: op = EOpConvUintToInt; break;
8690
default: assert(0);
8691
}
8692
break;
8693
case EbtUint:
8694
switch (node->getType().getBasicType()) {
8695
case EbtFloat: op = EOpConvFloatToUint; break;
8696
case EbtFloat16: op = EOpConvFloat16ToUint; break;
8697
case EbtUint8: op = EOpConvUint8ToUint; break;
8698
case EbtInt8: op = EOpConvInt8ToUint; break;
8699
case EbtUint16: op = EOpConvUint16ToUint; break;
8700
case EbtInt16: op = EOpConvInt16ToUint; break;
8701
case EbtInt: op = EOpConvIntToUint; break;
8702
default: assert(0);
8703
}
8704
break;
8705
case EbtInt16:
8706
switch (node->getType().getBasicType()) {
8707
case EbtFloat: op = EOpConvFloatToInt16; break;
8708
case EbtFloat16: op = EOpConvFloat16ToInt16; break;
8709
case EbtUint8: op = EOpConvUint8ToInt16; break;
8710
case EbtInt8: op = EOpConvInt8ToInt16; break;
8711
case EbtUint16: op = EOpConvUint16ToInt16; break;
8712
case EbtInt: op = EOpConvIntToInt16; break;
8713
case EbtUint: op = EOpConvUintToInt16; break;
8714
default: assert(0);
8715
}
8716
break;
8717
case EbtUint16:
8718
switch (node->getType().getBasicType()) {
8719
case EbtFloat: op = EOpConvFloatToUint16; break;
8720
case EbtFloat16: op = EOpConvFloat16ToUint16; break;
8721
case EbtUint8: op = EOpConvUint8ToUint16; break;
8722
case EbtInt8: op = EOpConvInt8ToUint16; break;
8723
case EbtInt16: op = EOpConvInt16ToUint16; break;
8724
case EbtInt: op = EOpConvIntToUint16; break;
8725
case EbtUint: op = EOpConvUintToUint16; break;
8726
default: assert(0);
8727
}
8728
break;
8729
case EbtInt8:
8730
switch (node->getType().getBasicType()) {
8731
case EbtFloat: op = EOpConvFloatToInt8; break;
8732
case EbtFloat16: op = EOpConvFloat16ToInt8; break;
8733
case EbtUint8: op = EOpConvUint8ToInt8; break;
8734
case EbtInt16: op = EOpConvInt16ToInt8; break;
8735
case EbtUint16: op = EOpConvUint16ToInt8; break;
8736
case EbtInt: op = EOpConvIntToInt8; break;
8737
case EbtUint: op = EOpConvUintToInt8; break;
8738
default: assert(0);
8739
}
8740
break;
8741
case EbtUint8:
8742
switch (node->getType().getBasicType()) {
8743
case EbtFloat: op = EOpConvFloatToUint8; break;
8744
case EbtFloat16: op = EOpConvFloat16ToUint8; break;
8745
case EbtInt8: op = EOpConvInt8ToUint8; break;
8746
case EbtInt16: op = EOpConvInt16ToUint8; break;
8747
case EbtUint16: op = EOpConvUint16ToUint8; break;
8748
case EbtInt: op = EOpConvIntToUint8; break;
8749
case EbtUint: op = EOpConvUintToUint8; break;
8750
default: assert(0);
8751
}
8752
break;
8753
case EbtFloat:
8754
switch (node->getType().getBasicType()) {
8755
case EbtFloat16: op = EOpConvFloat16ToFloat; break;
8756
case EbtInt8: op = EOpConvInt8ToFloat; break;
8757
case EbtUint8: op = EOpConvUint8ToFloat; break;
8758
case EbtInt16: op = EOpConvInt16ToFloat; break;
8759
case EbtUint16: op = EOpConvUint16ToFloat; break;
8760
case EbtInt: op = EOpConvIntToFloat; break;
8761
case EbtUint: op = EOpConvUintToFloat; break;
8762
default: assert(0);
8763
}
8764
break;
8765
case EbtFloat16:
8766
switch (node->getType().getBasicType()) {
8767
case EbtFloat: op = EOpConvFloatToFloat16; break;
8768
case EbtInt8: op = EOpConvInt8ToFloat16; break;
8769
case EbtUint8: op = EOpConvUint8ToFloat16; break;
8770
case EbtInt16: op = EOpConvInt16ToFloat16; break;
8771
case EbtUint16: op = EOpConvUint16ToFloat16; break;
8772
case EbtInt: op = EOpConvIntToFloat16; break;
8773
case EbtUint: op = EOpConvUintToFloat16; break;
8774
default: assert(0);
8775
}
8776
break;
8777
}
8778
8779
node = intermediate.addUnaryNode(op, node, node->getLoc(), type);
8780
// If it's a (non-specialization) constant, it must be folded.
8781
if (node->getAsUnaryNode()->getOperand()->getAsConstantUnion())
8782
return node->getAsUnaryNode()->getOperand()->getAsConstantUnion()->fold(op, node->getType());
8783
}
8784
8785
return node;
8786
8787
case EOpConstructAccStruct:
8788
if ((node->getType().isScalar() && node->getType().getBasicType() == EbtUint64)) {
8789
// construct acceleration structure from uint64
8790
requireExtensions(loc, Num_ray_tracing_EXTs, ray_tracing_EXTs, "uint64_t conversion to acclerationStructureEXT");
8791
return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToAccStruct, true, node,
8792
type);
8793
} else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint && node->getVectorSize() == 2) {
8794
// construct acceleration structure from uint64
8795
requireExtensions(loc, Num_ray_tracing_EXTs, ray_tracing_EXTs, "uvec2 conversion to accelerationStructureEXT");
8796
return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToAccStruct, true, node,
8797
type);
8798
} else
8799
return nullptr;
8800
8801
default:
8802
error(loc, "unsupported construction", "", "");
8803
8804
return nullptr;
8805
}
8806
newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc());
8807
if (newNode == nullptr) {
8808
error(loc, "can't convert", "constructor", "");
8809
return nullptr;
8810
}
8811
8812
//
8813
// Now, if there still isn't an operation to do the construction, and we need one, add one.
8814
//
8815
8816
// Otherwise, skip out early.
8817
if (subset || (newNode != node && newNode->getType() == type))
8818
return newNode;
8819
8820
// setAggregateOperator will insert a new node for the constructor, as needed.
8821
return intermediate.setAggregateOperator(newNode, op, type, loc);
8822
}
8823
8824
// This function tests for the type of the parameters to the structure or array constructor. Raises
8825
// an error message if the expected type does not match the parameter passed to the constructor.
8826
//
8827
// Returns nullptr for an error or the input node itself if the expected and the given parameter types match.
8828
//
8829
TIntermTyped* TParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount, const TSourceLoc& loc)
8830
{
8831
TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped());
8832
if (! converted || converted->getType() != type) {
8833
bool enhanced = intermediate.getEnhancedMsgs();
8834
error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount,
8835
node->getAsTyped()->getType().getCompleteString(enhanced).c_str(), type.getCompleteString(enhanced).c_str());
8836
8837
return nullptr;
8838
}
8839
8840
return converted;
8841
}
8842
8843
// If a memory qualifier is present in 'to', also make it present in 'from'.
8844
void TParseContext::inheritMemoryQualifiers(const TQualifier& from, TQualifier& to)
8845
{
8846
if (from.isReadOnly())
8847
to.readonly = from.readonly;
8848
if (from.isWriteOnly())
8849
to.writeonly = from.writeonly;
8850
if (from.coherent)
8851
to.coherent = from.coherent;
8852
if (from.volatil)
8853
to.volatil = from.volatil;
8854
if (from.restrict)
8855
to.restrict = from.restrict;
8856
}
8857
8858
//
8859
// Update qualifier layoutBindlessImage & layoutBindlessSampler on block member
8860
//
8861
void TParseContext::updateBindlessQualifier(TType& memberType)
8862
{
8863
if (memberType.containsSampler()) {
8864
if (memberType.isStruct()) {
8865
TTypeList* typeList = memberType.getWritableStruct();
8866
for (unsigned int member = 0; member < typeList->size(); ++member) {
8867
TType* subMemberType = (*typeList)[member].type;
8868
updateBindlessQualifier(*subMemberType);
8869
}
8870
}
8871
else if (memberType.getSampler().isImage()) {
8872
intermediate.setBindlessImageMode(currentCaller, AstRefTypeLayout);
8873
memberType.getQualifier().layoutBindlessImage = true;
8874
}
8875
else {
8876
intermediate.setBindlessTextureMode(currentCaller, AstRefTypeLayout);
8877
memberType.getQualifier().layoutBindlessSampler = true;
8878
}
8879
}
8880
}
8881
8882
//
8883
// Do everything needed to add an interface block.
8884
//
8885
void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, const TString* instanceName,
8886
TArraySizes* arraySizes)
8887
{
8888
if (spvVersion.vulkan > 0 && spvVersion.vulkanRelaxed)
8889
blockStorageRemap(loc, blockName, currentBlockQualifier);
8890
blockStageIoCheck(loc, currentBlockQualifier);
8891
blockQualifierCheck(loc, currentBlockQualifier, instanceName != nullptr);
8892
if (arraySizes != nullptr) {
8893
arraySizesCheck(loc, currentBlockQualifier, arraySizes, nullptr, false);
8894
arrayOfArrayVersionCheck(loc, arraySizes);
8895
if (arraySizes->getNumDims() > 1)
8896
requireProfile(loc, ~EEsProfile, "array-of-array of block");
8897
}
8898
8899
// Inherit and check member storage qualifiers WRT to the block-level qualifier.
8900
for (unsigned int member = 0; member < typeList.size(); ++member) {
8901
TType& memberType = *typeList[member].type;
8902
TQualifier& memberQualifier = memberType.getQualifier();
8903
const TSourceLoc& memberLoc = typeList[member].loc;
8904
if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage)
8905
error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), "");
8906
memberQualifier.storage = currentBlockQualifier.storage;
8907
globalQualifierFixCheck(memberLoc, memberQualifier);
8908
inheritMemoryQualifiers(currentBlockQualifier, memberQualifier);
8909
if (currentBlockQualifier.perPrimitiveNV)
8910
memberQualifier.perPrimitiveNV = currentBlockQualifier.perPrimitiveNV;
8911
if (currentBlockQualifier.perViewNV)
8912
memberQualifier.perViewNV = currentBlockQualifier.perViewNV;
8913
if (currentBlockQualifier.perTaskNV)
8914
memberQualifier.perTaskNV = currentBlockQualifier.perTaskNV;
8915
if (currentBlockQualifier.storage == EvqtaskPayloadSharedEXT)
8916
memberQualifier.storage = EvqtaskPayloadSharedEXT;
8917
if (memberQualifier.storage == EvqSpirvStorageClass)
8918
error(memberLoc, "member cannot have a spirv_storage_class qualifier", memberType.getFieldName().c_str(), "");
8919
if (memberQualifier.hasSpirvDecorate() && !memberQualifier.getSpirvDecorate().decorateIds.empty())
8920
error(memberLoc, "member cannot have a spirv_decorate_id qualifier", memberType.getFieldName().c_str(), "");
8921
if ((currentBlockQualifier.storage == EvqUniform || currentBlockQualifier.storage == EvqBuffer) && (memberQualifier.isInterpolation() || memberQualifier.isAuxiliary()))
8922
error(memberLoc, "member of uniform or buffer block cannot have an auxiliary or interpolation qualifier", memberType.getFieldName().c_str(), "");
8923
if (memberType.isArray())
8924
arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1);
8925
if (memberQualifier.hasOffset()) {
8926
if (spvVersion.spv == 0) {
8927
profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
8928
profileRequires(memberLoc, EEsProfile, 300, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
8929
}
8930
}
8931
8932
// For bindless texture, sampler can be declared as uniform/storage block member,
8933
if (memberType.containsOpaque()) {
8934
if (memberType.containsSampler() && extensionTurnedOn(E_GL_ARB_bindless_texture))
8935
updateBindlessQualifier(memberType);
8936
else
8937
error(memberLoc, "member of block cannot be or contain a sampler, image, or atomic_uint type", typeList[member].type->getFieldName().c_str(), "");
8938
}
8939
8940
if (memberType.containsCoopMat())
8941
error(memberLoc, "member of block cannot be or contain a cooperative matrix type", typeList[member].type->getFieldName().c_str(), "");
8942
}
8943
8944
// This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will
8945
// do all the rest.
8946
if (! symbolTable.atBuiltInLevel() && builtInName(*blockName)) {
8947
redeclareBuiltinBlock(loc, typeList, *blockName, instanceName, arraySizes);
8948
return;
8949
}
8950
8951
// Not a redeclaration of a built-in; check that all names are user names.
8952
reservedErrorCheck(loc, *blockName);
8953
if (instanceName)
8954
reservedErrorCheck(loc, *instanceName);
8955
for (unsigned int member = 0; member < typeList.size(); ++member)
8956
reservedErrorCheck(typeList[member].loc, typeList[member].type->getFieldName());
8957
8958
// Make default block qualification, and adjust the member qualifications
8959
8960
TQualifier defaultQualification;
8961
switch (currentBlockQualifier.storage) {
8962
case EvqUniform: defaultQualification = globalUniformDefaults; break;
8963
case EvqBuffer: defaultQualification = globalBufferDefaults; break;
8964
case EvqVaryingIn: defaultQualification = globalInputDefaults; break;
8965
case EvqVaryingOut: defaultQualification = globalOutputDefaults; break;
8966
case EvqShared: defaultQualification = globalSharedDefaults; break;
8967
default: defaultQualification.clear(); break;
8968
}
8969
8970
// Special case for "push_constant uniform", which has a default of std430,
8971
// contrary to normal uniform defaults, and can't have a default tracked for it.
8972
if ((currentBlockQualifier.isPushConstant() && !currentBlockQualifier.hasPacking()) ||
8973
(currentBlockQualifier.isShaderRecord() && !currentBlockQualifier.hasPacking()))
8974
currentBlockQualifier.layoutPacking = ElpStd430;
8975
8976
// Special case for "taskNV in/out", which has a default of std430,
8977
if (currentBlockQualifier.isTaskMemory() && !currentBlockQualifier.hasPacking())
8978
currentBlockQualifier.layoutPacking = ElpStd430;
8979
8980
// fix and check for member layout qualifiers
8981
8982
mergeObjectLayoutQualifiers(defaultQualification, currentBlockQualifier, true);
8983
8984
// "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
8985
if (currentBlockQualifier.hasAlign()) {
8986
if (defaultQualification.layoutPacking != ElpStd140 &&
8987
defaultQualification.layoutPacking != ElpStd430 &&
8988
defaultQualification.layoutPacking != ElpScalar) {
8989
error(loc, "can only be used with std140, std430, or scalar layout packing", "align", "");
8990
defaultQualification.layoutAlign = -1;
8991
}
8992
}
8993
8994
bool memberWithLocation = false;
8995
bool memberWithoutLocation = false;
8996
bool memberWithPerViewQualifier = false;
8997
for (unsigned int member = 0; member < typeList.size(); ++member) {
8998
TQualifier& memberQualifier = typeList[member].type->getQualifier();
8999
const TSourceLoc& memberLoc = typeList[member].loc;
9000
if (memberQualifier.hasStream()) {
9001
if (defaultQualification.layoutStream != memberQualifier.layoutStream)
9002
error(memberLoc, "member cannot contradict block", "stream", "");
9003
}
9004
9005
// "This includes a block's inheritance of the
9006
// current global default buffer, a block member's inheritance of the block's
9007
// buffer, and the requirement that any *xfb_buffer* declared on a block
9008
// member must match the buffer inherited from the block."
9009
if (memberQualifier.hasXfbBuffer()) {
9010
if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
9011
error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
9012
}
9013
9014
if (memberQualifier.hasPacking())
9015
error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), "");
9016
if (memberQualifier.hasLocation()) {
9017
const char* feature = "location on block member";
9018
switch (currentBlockQualifier.storage) {
9019
case EvqVaryingIn:
9020
case EvqVaryingOut:
9021
requireProfile(memberLoc, ECoreProfile | ECompatibilityProfile | EEsProfile, feature);
9022
profileRequires(memberLoc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
9023
profileRequires(memberLoc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
9024
memberWithLocation = true;
9025
break;
9026
default:
9027
error(memberLoc, "can only use in an in/out block", feature, "");
9028
break;
9029
}
9030
} else
9031
memberWithoutLocation = true;
9032
9033
// "The offset qualifier can only be used on block members of blocks declared with std140 or std430 layouts."
9034
// "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
9035
if (memberQualifier.hasAlign() || memberQualifier.hasOffset()) {
9036
if (defaultQualification.layoutPacking != ElpStd140 &&
9037
defaultQualification.layoutPacking != ElpStd430 &&
9038
defaultQualification.layoutPacking != ElpScalar)
9039
error(memberLoc, "can only be used with std140, std430, or scalar layout packing", "offset/align", "");
9040
}
9041
9042
if (memberQualifier.isPerView()) {
9043
memberWithPerViewQualifier = true;
9044
}
9045
9046
TQualifier newMemberQualification = defaultQualification;
9047
mergeQualifiers(memberLoc, newMemberQualification, memberQualifier, false);
9048
memberQualifier = newMemberQualification;
9049
}
9050
9051
layoutMemberLocationArrayCheck(loc, memberWithLocation, arraySizes);
9052
9053
// Ensure that the block has an XfbBuffer assigned. This is needed
9054
// because if the block has a XfbOffset assigned, then it is
9055
// assumed that it has implicitly assigned the current global
9056
// XfbBuffer, and because it's members need to be assigned a
9057
// XfbOffset if they lack it.
9058
if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
9059
if (!currentBlockQualifier.hasXfbBuffer() && currentBlockQualifier.hasXfbOffset())
9060
currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
9061
}
9062
9063
// Process the members
9064
fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
9065
fixXfbOffsets(currentBlockQualifier, typeList);
9066
fixBlockUniformOffsets(currentBlockQualifier, typeList);
9067
fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr);
9068
fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr);
9069
for (unsigned int member = 0; member < typeList.size(); ++member)
9070
layoutTypeCheck(typeList[member].loc, *typeList[member].type);
9071
9072
if (memberWithPerViewQualifier) {
9073
for (unsigned int member = 0; member < typeList.size(); ++member) {
9074
checkAndResizeMeshViewDim(typeList[member].loc, *typeList[member].type, /*isBlockMember*/ true);
9075
}
9076
}
9077
9078
// reverse merge, so that currentBlockQualifier now has all layout information
9079
// (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers)
9080
mergeObjectLayoutQualifiers(currentBlockQualifier, defaultQualification, true);
9081
9082
//
9083
// Build and add the interface block as a new type named 'blockName'
9084
//
9085
9086
TType blockType(&typeList, *blockName, currentBlockQualifier);
9087
if (arraySizes != nullptr)
9088
blockType.transferArraySizes(arraySizes);
9089
9090
if (arraySizes == nullptr)
9091
ioArrayCheck(loc, blockType, instanceName ? *instanceName : *blockName);
9092
if (currentBlockQualifier.hasBufferReference()) {
9093
9094
if (currentBlockQualifier.storage != EvqBuffer)
9095
error(loc, "can only be used with buffer", "buffer_reference", "");
9096
9097
// Create the block reference type. If it was forward-declared, detect that
9098
// as a referent struct type with no members. Replace the referent type with
9099
// blockType.
9100
TType blockNameType(EbtReference, blockType, *blockName);
9101
TVariable* blockNameVar = new TVariable(blockName, blockNameType, true);
9102
if (! symbolTable.insert(*blockNameVar)) {
9103
TSymbol* existingName = symbolTable.find(*blockName);
9104
if (existingName->getType().isReference() &&
9105
existingName->getType().getReferentType()->getStruct() &&
9106
existingName->getType().getReferentType()->getStruct()->size() == 0 &&
9107
existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
9108
existingName->getType().getReferentType()->deepCopy(blockType);
9109
} else {
9110
error(loc, "block name cannot be redefined", blockName->c_str(), "");
9111
}
9112
}
9113
if (!instanceName) {
9114
return;
9115
}
9116
} else {
9117
//
9118
// Don't make a user-defined type out of block name; that will cause an error
9119
// if the same block name gets reused in a different interface.
9120
//
9121
// "Block names have no other use within a shader
9122
// beyond interface matching; it is a compile-time error to use a block name at global scope for anything
9123
// other than as a block name (e.g., use of a block name for a global variable name or function name is
9124
// currently reserved)."
9125
//
9126
// Use the symbol table to prevent normal reuse of the block's name, as a variable entry,
9127
// whose type is EbtBlock, but without all the structure; that will come from the type
9128
// the instances point to.
9129
//
9130
TType blockNameType(EbtBlock, blockType.getQualifier().storage);
9131
TVariable* blockNameVar = new TVariable(blockName, blockNameType);
9132
if (! symbolTable.insert(*blockNameVar)) {
9133
TSymbol* existingName = symbolTable.find(*blockName);
9134
if (existingName->getType().getBasicType() == EbtBlock) {
9135
if (existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
9136
error(loc, "Cannot reuse block name within the same interface:", blockName->c_str(), blockType.getStorageQualifierString());
9137
return;
9138
}
9139
} else {
9140
error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
9141
return;
9142
}
9143
}
9144
}
9145
9146
// Add the variable, as anonymous or named instanceName.
9147
// Make an anonymous variable if no name was provided.
9148
if (! instanceName)
9149
instanceName = NewPoolTString("");
9150
9151
TVariable& variable = *new TVariable(instanceName, blockType);
9152
if (! symbolTable.insert(variable)) {
9153
if (*instanceName == "")
9154
error(loc, "nameless block contains a member that already has a name at global scope", blockName->c_str(), "");
9155
else
9156
error(loc, "block instance name redefinition", variable.getName().c_str(), "");
9157
9158
return;
9159
}
9160
9161
// Check for general layout qualifier errors
9162
layoutObjectCheck(loc, variable);
9163
9164
// fix up
9165
if (isIoResizeArray(blockType)) {
9166
ioArraySymbolResizeList.push_back(&variable);
9167
checkIoArraysConsistency(loc, true);
9168
} else
9169
fixIoArraySize(loc, variable.getWritableType());
9170
9171
// Save it in the AST for linker use.
9172
trackLinkage(variable);
9173
}
9174
9175
//
9176
// allow storage type of block to be remapped at compile time
9177
//
9178
void TParseContext::blockStorageRemap(const TSourceLoc&, const TString* instanceName, TQualifier& qualifier)
9179
{
9180
TBlockStorageClass type = intermediate.getBlockStorageOverride(instanceName->c_str());
9181
if (type != EbsNone) {
9182
qualifier.setBlockStorage(type);
9183
}
9184
}
9185
9186
// Do all block-declaration checking regarding the combination of in/out/uniform/buffer
9187
// with a particular stage.
9188
void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier)
9189
{
9190
const char *extsrt[2] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing };
9191
switch (qualifier.storage) {
9192
case EvqUniform:
9193
profileRequires(loc, EEsProfile, 300, nullptr, "uniform block");
9194
profileRequires(loc, ENoProfile, 140, E_GL_ARB_uniform_buffer_object, "uniform block");
9195
if (currentBlockQualifier.layoutPacking == ElpStd430 && ! currentBlockQualifier.isPushConstant())
9196
requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "std430 requires the buffer storage qualifier");
9197
break;
9198
case EvqBuffer:
9199
requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block");
9200
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "buffer block");
9201
profileRequires(loc, EEsProfile, 310, nullptr, "buffer block");
9202
break;
9203
case EvqVaryingIn:
9204
profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "input block");
9205
// It is a compile-time error to have an input block in a vertex shader or an output block in a fragment shader
9206
// "Compute shaders do not permit user-defined input variables..."
9207
requireStage(loc, (EShLanguageMask)(EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask|
9208
EShLangFragmentMask|EShLangMeshMask), "input block");
9209
if (language == EShLangFragment) {
9210
profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "fragment input block");
9211
} else if (language == EShLangMesh && ! qualifier.isTaskMemory()) {
9212
error(loc, "input blocks cannot be used in a mesh shader", "out", "");
9213
}
9214
break;
9215
case EvqVaryingOut:
9216
profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "output block");
9217
requireStage(loc, (EShLanguageMask)(EShLangVertexMask|EShLangTessControlMask|EShLangTessEvaluationMask|
9218
EShLangGeometryMask|EShLangMeshMask|EShLangTaskMask), "output block");
9219
// ES 310 can have a block before shader_io is turned on, so skip this test for built-ins
9220
if (language == EShLangVertex && ! parsingBuiltins) {
9221
profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "vertex output block");
9222
} else if (language == EShLangMesh && qualifier.isTaskMemory()) {
9223
error(loc, "can only use on input blocks in mesh shader", "taskNV", "");
9224
} else if (language == EShLangTask && ! qualifier.isTaskMemory()) {
9225
error(loc, "output blocks cannot be used in a task shader", "out", "");
9226
}
9227
break;
9228
case EvqShared:
9229
if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4) {
9230
error(loc, "shared block requires at least SPIR-V 1.4", "shared block", "");
9231
}
9232
profileRequires(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shared_memory_block, "shared block");
9233
break;
9234
case EvqPayload:
9235
profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadNV block");
9236
requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask),
9237
"rayPayloadNV block");
9238
break;
9239
case EvqPayloadIn:
9240
profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadInNV block");
9241
requireStage(loc, (EShLanguageMask)(EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask),
9242
"rayPayloadInNV block");
9243
break;
9244
case EvqHitAttr:
9245
profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "hitAttributeNV block");
9246
requireStage(loc, (EShLanguageMask)(EShLangIntersectMask | EShLangAnyHitMask | EShLangClosestHitMask), "hitAttributeNV block");
9247
break;
9248
case EvqCallableData:
9249
profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataNV block");
9250
requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask),
9251
"callableDataNV block");
9252
break;
9253
case EvqCallableDataIn:
9254
profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataInNV block");
9255
requireStage(loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV block");
9256
break;
9257
case EvqHitObjectAttrNV:
9258
profileRequires(loc, ~EEsProfile, 460, E_GL_NV_shader_invocation_reorder, "hitObjectAttributeNV block");
9259
requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask), "hitObjectAttributeNV block");
9260
break;
9261
default:
9262
error(loc, "only uniform, buffer, in, or out blocks are supported", blockName->c_str(), "");
9263
break;
9264
}
9265
}
9266
9267
// Do all block-declaration checking regarding its qualifiers.
9268
void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier, bool /*instanceName*/)
9269
{
9270
// The 4.5 specification says:
9271
//
9272
// interface-block :
9273
// layout-qualifieropt interface-qualifier block-name { member-list } instance-nameopt ;
9274
//
9275
// interface-qualifier :
9276
// in
9277
// out
9278
// patch in
9279
// patch out
9280
// uniform
9281
// buffer
9282
//
9283
// Note however memory qualifiers aren't included, yet the specification also says
9284
//
9285
// "...memory qualifiers may also be used in the declaration of shader storage blocks..."
9286
9287
if (qualifier.isInterpolation())
9288
error(loc, "cannot use interpolation qualifiers on an interface block", "flat/smooth/noperspective", "");
9289
if (qualifier.centroid)
9290
error(loc, "cannot use centroid qualifier on an interface block", "centroid", "");
9291
if (qualifier.isSample())
9292
error(loc, "cannot use sample qualifier on an interface block", "sample", "");
9293
if (qualifier.invariant)
9294
error(loc, "cannot use invariant qualifier on an interface block", "invariant", "");
9295
if (qualifier.isPushConstant())
9296
intermediate.addPushConstantCount();
9297
if (qualifier.isShaderRecord())
9298
intermediate.addShaderRecordCount();
9299
if (qualifier.isTaskMemory())
9300
intermediate.addTaskNVCount();
9301
}
9302
9303
//
9304
// "For a block, this process applies to the entire block, or until the first member
9305
// is reached that has a location layout qualifier. When a block member is declared with a location
9306
// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
9307
// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
9308
// until the next member declared with a location qualifier. The values used for locations do not have to be
9309
// declared in increasing order."
9310
void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
9311
{
9312
// "If a block has no block-level location layout qualifier, it is required that either all or none of its members
9313
// have a location layout qualifier, or a compile-time error results."
9314
if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
9315
error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
9316
else {
9317
if (memberWithLocation) {
9318
// remove any block-level location and make it per *every* member
9319
int nextLocation = 0; // by the rule above, initial value is not relevant
9320
if (qualifier.hasAnyLocation()) {
9321
nextLocation = qualifier.layoutLocation;
9322
qualifier.layoutLocation = TQualifier::layoutLocationEnd;
9323
if (qualifier.hasComponent()) {
9324
// "It is a compile-time error to apply the *component* qualifier to a ... block"
9325
error(loc, "cannot apply to a block", "component", "");
9326
}
9327
if (qualifier.hasIndex()) {
9328
error(loc, "cannot apply to a block", "index", "");
9329
}
9330
}
9331
for (unsigned int member = 0; member < typeList.size(); ++member) {
9332
TQualifier& memberQualifier = typeList[member].type->getQualifier();
9333
const TSourceLoc& memberLoc = typeList[member].loc;
9334
if (! memberQualifier.hasLocation()) {
9335
if (nextLocation >= (int)TQualifier::layoutLocationEnd)
9336
error(memberLoc, "location is too large", "location", "");
9337
memberQualifier.layoutLocation = nextLocation;
9338
memberQualifier.layoutComponent = TQualifier::layoutComponentEnd;
9339
}
9340
nextLocation = memberQualifier.layoutLocation + intermediate.computeTypeLocationSize(
9341
*typeList[member].type, language);
9342
}
9343
}
9344
}
9345
}
9346
9347
void TParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
9348
{
9349
// "If a block is qualified with xfb_offset, all its
9350
// members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
9351
// members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
9352
// offsets."
9353
9354
if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
9355
return;
9356
9357
int nextOffset = qualifier.layoutXfbOffset;
9358
for (unsigned int member = 0; member < typeList.size(); ++member) {
9359
TQualifier& memberQualifier = typeList[member].type->getQualifier();
9360
bool contains64BitType = false;
9361
bool contains32BitType = false;
9362
bool contains16BitType = false;
9363
int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType);
9364
// see if we need to auto-assign an offset to this member
9365
if (! memberQualifier.hasXfbOffset()) {
9366
// "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8"
9367
if (contains64BitType)
9368
RoundToPow2(nextOffset, 8);
9369
else if (contains32BitType)
9370
RoundToPow2(nextOffset, 4);
9371
else if (contains16BitType)
9372
RoundToPow2(nextOffset, 2);
9373
memberQualifier.layoutXfbOffset = nextOffset;
9374
} else
9375
nextOffset = memberQualifier.layoutXfbOffset;
9376
nextOffset += memberSize;
9377
}
9378
9379
// The above gave all block members an offset, so we can take it off the block now,
9380
// which will avoid double counting the offset usage.
9381
qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
9382
}
9383
9384
// Calculate and save the offset of each block member, using the recursively
9385
// defined block offset rules and the user-provided offset and align.
9386
//
9387
// Also, compute and save the total size of the block. For the block's size, arrayness
9388
// is not taken into account, as each element is backed by a separate buffer.
9389
//
9390
void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList)
9391
{
9392
if (!storageCanHaveLayoutInBlock(qualifier.storage) && !qualifier.isTaskMemory())
9393
return;
9394
if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar)
9395
return;
9396
9397
int offset = 0;
9398
int memberSize;
9399
for (unsigned int member = 0; member < typeList.size(); ++member) {
9400
TQualifier& memberQualifier = typeList[member].type->getQualifier();
9401
const TSourceLoc& memberLoc = typeList[member].loc;
9402
9403
// "When align is applied to an array, it effects only the start of the array, not the array's internal stride."
9404
9405
// modify just the children's view of matrix layout, if there is one for this member
9406
TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix;
9407
int dummyStride;
9408
int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking,
9409
subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor);
9410
if (memberQualifier.hasOffset()) {
9411
// "The specified offset must be a multiple
9412
// of the base alignment of the type of the block member it qualifies, or a compile-time error results."
9413
if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
9414
error(memberLoc, "must be a multiple of the member's alignment", "offset",
9415
"(layout offset = %d | member alignment = %d)", memberQualifier.layoutOffset, memberAlignment);
9416
9417
// GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous
9418
// member in the block or that lies within the previous member of the block"
9419
if (spvVersion.spv == 0) {
9420
if (memberQualifier.layoutOffset < offset)
9421
error(memberLoc, "cannot lie in previous members", "offset", "");
9422
9423
// "The offset qualifier forces the qualified member to start at or after the specified
9424
// integral-constant expression, which will be its byte offset from the beginning of the buffer.
9425
// "The actual offset of a member is computed as
9426
// follows: If offset was declared, start with that offset, otherwise start with the next available offset."
9427
offset = std::max(offset, memberQualifier.layoutOffset);
9428
} else {
9429
// TODO: Vulkan: "It is a compile-time error to have any offset, explicit or assigned,
9430
// that lies within another member of the block."
9431
9432
offset = memberQualifier.layoutOffset;
9433
}
9434
}
9435
9436
// "The actual alignment of a member will be the greater of the specified align alignment and the standard
9437
// (e.g., std140) base alignment for the member's type."
9438
if (memberQualifier.hasAlign())
9439
memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
9440
9441
// "If the resulting offset is not a multiple of the actual alignment,
9442
// increase it to the first offset that is a multiple of
9443
// the actual alignment."
9444
RoundToPow2(offset, memberAlignment);
9445
typeList[member].type->getQualifier().layoutOffset = offset;
9446
offset += memberSize;
9447
}
9448
}
9449
9450
//
9451
// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct,
9452
// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive.
9453
//
9454
void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList,
9455
TTypeList* tmpTypeList)
9456
{
9457
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
9458
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
9459
if (qualifier.layoutPacking != ElpNone) {
9460
if (tmpTypeList == nullptr) {
9461
if (((*originTypeList)[member].type->isMatrix() ||
9462
(*originTypeList)[member].type->getBasicType() == EbtStruct) &&
9463
(*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
9464
(*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
9465
}
9466
} else {
9467
if (((*tmpTypeList)[member].type->isMatrix() ||
9468
(*tmpTypeList)[member].type->getBasicType() == EbtStruct) &&
9469
(*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
9470
(*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
9471
}
9472
}
9473
}
9474
9475
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
9476
TQualifier* memberQualifier = nullptr;
9477
// block member can be declare a matrix style, so it should be update to the member's style
9478
if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
9479
memberQualifier = &qualifier;
9480
} else {
9481
memberQualifier = &((*originTypeList)[member].type->getQualifier());
9482
}
9483
9484
const TType* tmpType = tmpTypeList == nullptr ?
9485
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
9486
9487
fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(),
9488
tmpType->getWritableStruct());
9489
9490
const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType);
9491
9492
if (tmpTypeList == nullptr) {
9493
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
9494
}
9495
if (tmpTypeList != nullptr) {
9496
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
9497
}
9498
}
9499
}
9500
}
9501
9502
//
9503
// Spread LayoutPacking to matrix or aggregate block members. If a block member is a struct or
9504
// array of struct, spread LayoutPacking recursively to its matrix or aggregate members.
9505
//
9506
void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList,
9507
TTypeList* tmpTypeList)
9508
{
9509
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
9510
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
9511
if (qualifier.layoutPacking != ElpNone) {
9512
if (tmpTypeList == nullptr) {
9513
if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone &&
9514
!(*originTypeList)[member].type->isScalarOrVector()) {
9515
(*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
9516
}
9517
} else {
9518
if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone &&
9519
!(*tmpTypeList)[member].type->isScalarOrVector()) {
9520
(*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
9521
}
9522
}
9523
}
9524
9525
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
9526
// Deep copy the type in pool.
9527
// Because, struct use in different block may have different layout qualifier.
9528
// We have to new a object to distinguish between them.
9529
const TType* tmpType = tmpTypeList == nullptr ?
9530
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
9531
9532
fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(),
9533
tmpType->getWritableStruct());
9534
9535
const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType);
9536
9537
if (tmpTypeList == nullptr) {
9538
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
9539
}
9540
if (tmpTypeList != nullptr) {
9541
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
9542
}
9543
}
9544
}
9545
}
9546
9547
// For an identifier that is already declared, add more qualification to it.
9548
void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
9549
{
9550
TSymbol* symbol = symbolTable.find(identifier);
9551
9552
// A forward declaration of a block reference looks to the grammar like adding
9553
// a qualifier to an existing symbol. Detect this and create the block reference
9554
// type with an empty type list, which will be filled in later in
9555
// TParseContext::declareBlock.
9556
if (!symbol && qualifier.hasBufferReference()) {
9557
TTypeList typeList;
9558
TType blockType(&typeList, identifier, qualifier);
9559
TType blockNameType(EbtReference, blockType, identifier);
9560
TVariable* blockNameVar = new TVariable(&identifier, blockNameType, true);
9561
if (! symbolTable.insert(*blockNameVar)) {
9562
error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
9563
}
9564
return;
9565
}
9566
9567
if (! symbol) {
9568
error(loc, "identifier not previously declared", identifier.c_str(), "");
9569
return;
9570
}
9571
if (symbol->getAsFunction()) {
9572
error(loc, "cannot re-qualify a function name", identifier.c_str(), "");
9573
return;
9574
}
9575
9576
if (qualifier.isAuxiliary() ||
9577
qualifier.isMemory() ||
9578
qualifier.isInterpolation() ||
9579
qualifier.hasLayout() ||
9580
qualifier.storage != EvqTemporary ||
9581
qualifier.precision != EpqNone) {
9582
error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), "");
9583
return;
9584
}
9585
9586
// For read-only built-ins, add a new symbol for holding the modified qualifier.
9587
// This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block)
9588
if (symbol->isReadOnly())
9589
symbol = symbolTable.copyUp(symbol);
9590
9591
if (qualifier.invariant) {
9592
if (intermediate.inIoAccessed(identifier))
9593
error(loc, "cannot change qualification after use", "invariant", "");
9594
symbol->getWritableType().getQualifier().invariant = true;
9595
invariantCheck(loc, symbol->getType().getQualifier());
9596
} else if (qualifier.isNoContraction()) {
9597
if (intermediate.inIoAccessed(identifier))
9598
error(loc, "cannot change qualification after use", "precise", "");
9599
symbol->getWritableType().getQualifier().setNoContraction();
9600
} else if (qualifier.specConstant) {
9601
symbol->getWritableType().getQualifier().makeSpecConstant();
9602
if (qualifier.hasSpecConstantId())
9603
symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId;
9604
} else
9605
warn(loc, "unknown requalification", "", "");
9606
}
9607
9608
void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers)
9609
{
9610
for (unsigned int i = 0; i < identifiers.size(); ++i)
9611
addQualifierToExisting(loc, qualifier, *identifiers[i]);
9612
}
9613
9614
// Make sure 'invariant' isn't being applied to a non-allowed object.
9615
void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qualifier)
9616
{
9617
if (! qualifier.invariant)
9618
return;
9619
9620
bool pipeOut = qualifier.isPipeOutput();
9621
bool pipeIn = qualifier.isPipeInput();
9622
if ((version >= 300 && isEsProfile()) || (!isEsProfile() && version >= 420)) {
9623
if (! pipeOut)
9624
error(loc, "can only apply to an output", "invariant", "");
9625
} else {
9626
if ((language == EShLangVertex && pipeIn) || (! pipeOut && ! pipeIn))
9627
error(loc, "can only apply to an output, or to an input in a non-vertex stage\n", "invariant", "");
9628
}
9629
}
9630
9631
//
9632
// Updating default qualifier for the case of a declaration with just a qualifier,
9633
// no type, block, or identifier.
9634
//
9635
void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType)
9636
{
9637
if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) {
9638
assert(language == EShLangTessControl || language == EShLangGeometry || language == EShLangMesh);
9639
const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices";
9640
9641
if (publicType.qualifier.storage != EvqVaryingOut)
9642
error(loc, "can only apply to 'out'", id, "");
9643
if (! intermediate.setVertices(publicType.shaderQualifiers.vertices))
9644
error(loc, "cannot change previously set layout value", id, "");
9645
9646
if (language == EShLangTessControl)
9647
checkIoArraysConsistency(loc);
9648
}
9649
if (publicType.shaderQualifiers.primitives != TQualifier::layoutNotSet) {
9650
assert(language == EShLangMesh);
9651
const char* id = "max_primitives";
9652
9653
if (publicType.qualifier.storage != EvqVaryingOut)
9654
error(loc, "can only apply to 'out'", id, "");
9655
if (! intermediate.setPrimitives(publicType.shaderQualifiers.primitives))
9656
error(loc, "cannot change previously set layout value", id, "");
9657
}
9658
if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) {
9659
if (publicType.qualifier.storage != EvqVaryingIn)
9660
error(loc, "can only apply to 'in'", "invocations", "");
9661
if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations))
9662
error(loc, "cannot change previously set layout value", "invocations", "");
9663
}
9664
if (publicType.shaderQualifiers.geometry != ElgNone) {
9665
if (publicType.qualifier.storage == EvqVaryingIn) {
9666
switch (publicType.shaderQualifiers.geometry) {
9667
case ElgPoints:
9668
case ElgLines:
9669
case ElgLinesAdjacency:
9670
case ElgTriangles:
9671
case ElgTrianglesAdjacency:
9672
case ElgQuads:
9673
case ElgIsolines:
9674
if (language == EShLangMesh) {
9675
error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9676
break;
9677
}
9678
if (intermediate.setInputPrimitive(publicType.shaderQualifiers.geometry)) {
9679
if (language == EShLangGeometry)
9680
checkIoArraysConsistency(loc);
9681
} else
9682
error(loc, "cannot change previously set input primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9683
break;
9684
default:
9685
error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9686
}
9687
} else if (publicType.qualifier.storage == EvqVaryingOut) {
9688
switch (publicType.shaderQualifiers.geometry) {
9689
case ElgLines:
9690
case ElgTriangles:
9691
if (language != EShLangMesh) {
9692
error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9693
break;
9694
}
9695
[[fallthrough]];
9696
case ElgPoints:
9697
case ElgLineStrip:
9698
case ElgTriangleStrip:
9699
if (! intermediate.setOutputPrimitive(publicType.shaderQualifiers.geometry))
9700
error(loc, "cannot change previously set output primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9701
break;
9702
default:
9703
error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
9704
}
9705
} else
9706
error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), GetStorageQualifierString(publicType.qualifier.storage));
9707
}
9708
if (publicType.shaderQualifiers.spacing != EvsNone) {
9709
if (publicType.qualifier.storage == EvqVaryingIn) {
9710
if (! intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing))
9711
error(loc, "cannot change previously set vertex spacing", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
9712
} else
9713
error(loc, "can only apply to 'in'", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
9714
}
9715
if (publicType.shaderQualifiers.order != EvoNone) {
9716
if (publicType.qualifier.storage == EvqVaryingIn) {
9717
if (! intermediate.setVertexOrder(publicType.shaderQualifiers.order))
9718
error(loc, "cannot change previously set vertex order", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
9719
} else
9720
error(loc, "can only apply to 'in'", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
9721
}
9722
if (publicType.shaderQualifiers.pointMode) {
9723
if (publicType.qualifier.storage == EvqVaryingIn)
9724
intermediate.setPointMode();
9725
else
9726
error(loc, "can only apply to 'in'", "point_mode", "");
9727
}
9728
9729
for (int i = 0; i < 3; ++i) {
9730
if (publicType.shaderQualifiers.localSizeNotDefault[i]) {
9731
if (publicType.qualifier.storage == EvqVaryingIn) {
9732
if (! intermediate.setLocalSize(i, publicType.shaderQualifiers.localSize[i]))
9733
error(loc, "cannot change previously set size", "local_size", "");
9734
else {
9735
int max = 0;
9736
if (language == EShLangCompute) {
9737
switch (i) {
9738
case 0: max = resources.maxComputeWorkGroupSizeX; break;
9739
case 1: max = resources.maxComputeWorkGroupSizeY; break;
9740
case 2: max = resources.maxComputeWorkGroupSizeZ; break;
9741
default: break;
9742
}
9743
if (intermediate.getLocalSize(i) > (unsigned int)max)
9744
error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", "");
9745
} else if (language == EShLangMesh) {
9746
switch (i) {
9747
case 0:
9748
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9749
resources.maxMeshWorkGroupSizeX_EXT :
9750
resources.maxMeshWorkGroupSizeX_NV;
9751
break;
9752
case 1:
9753
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9754
resources.maxMeshWorkGroupSizeY_EXT :
9755
resources.maxMeshWorkGroupSizeY_NV ;
9756
break;
9757
case 2:
9758
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9759
resources.maxMeshWorkGroupSizeZ_EXT :
9760
resources.maxMeshWorkGroupSizeZ_NV ;
9761
break;
9762
default: break;
9763
}
9764
if (intermediate.getLocalSize(i) > (unsigned int)max) {
9765
TString maxsErrtring = "too large, see ";
9766
maxsErrtring.append(extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9767
"gl_MaxMeshWorkGroupSizeEXT" : "gl_MaxMeshWorkGroupSizeNV");
9768
error(loc, maxsErrtring.c_str(), "local_size", "");
9769
}
9770
} else if (language == EShLangTask) {
9771
switch (i) {
9772
case 0:
9773
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9774
resources.maxTaskWorkGroupSizeX_EXT :
9775
resources.maxTaskWorkGroupSizeX_NV;
9776
break;
9777
case 1:
9778
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9779
resources.maxTaskWorkGroupSizeY_EXT:
9780
resources.maxTaskWorkGroupSizeY_NV;
9781
break;
9782
case 2:
9783
max = extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9784
resources.maxTaskWorkGroupSizeZ_EXT:
9785
resources.maxTaskWorkGroupSizeZ_NV;
9786
break;
9787
default: break;
9788
}
9789
if (intermediate.getLocalSize(i) > (unsigned int)max) {
9790
TString maxsErrtring = "too large, see ";
9791
maxsErrtring.append(extensionTurnedOn(E_GL_EXT_mesh_shader) ?
9792
"gl_MaxTaskWorkGroupSizeEXT" : "gl_MaxTaskWorkGroupSizeNV");
9793
error(loc, maxsErrtring.c_str(), "local_size", "");
9794
}
9795
} else {
9796
assert(0);
9797
}
9798
9799
// Fix the existing constant gl_WorkGroupSize with this new information.
9800
TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
9801
if (workGroupSize != nullptr)
9802
workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i));
9803
}
9804
} else
9805
error(loc, "can only apply to 'in'", "local_size", "");
9806
}
9807
if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) {
9808
if (publicType.qualifier.storage == EvqVaryingIn) {
9809
if (! intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i]))
9810
error(loc, "cannot change previously set size", "local_size", "");
9811
} else
9812
error(loc, "can only apply to 'in'", "local_size id", "");
9813
// Set the workgroup built-in variable as a specialization constant
9814
TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
9815
if (workGroupSize != nullptr)
9816
workGroupSize->getWritableType().getQualifier().specConstant = true;
9817
}
9818
}
9819
9820
if (publicType.shaderQualifiers.earlyFragmentTests) {
9821
if (publicType.qualifier.storage == EvqVaryingIn)
9822
intermediate.setEarlyFragmentTests();
9823
else
9824
error(loc, "can only apply to 'in'", "early_fragment_tests", "");
9825
}
9826
if (publicType.shaderQualifiers.earlyAndLateFragmentTestsAMD) {
9827
if (publicType.qualifier.storage == EvqVaryingIn)
9828
intermediate.setEarlyAndLateFragmentTestsAMD();
9829
else
9830
error(loc, "can only apply to 'in'", "early_and_late_fragment_tests_amd", "");
9831
}
9832
if (publicType.shaderQualifiers.postDepthCoverage) {
9833
if (publicType.qualifier.storage == EvqVaryingIn)
9834
intermediate.setPostDepthCoverage();
9835
else
9836
error(loc, "can only apply to 'in'", "post_coverage_coverage", "");
9837
}
9838
if (publicType.shaderQualifiers.nonCoherentColorAttachmentReadEXT) {
9839
if (publicType.qualifier.storage == EvqVaryingIn)
9840
intermediate.setNonCoherentColorAttachmentReadEXT();
9841
else
9842
error(loc, "can only apply to 'in'", "non_coherent_color_attachment_readEXT", "");
9843
}
9844
if (publicType.shaderQualifiers.nonCoherentDepthAttachmentReadEXT) {
9845
if (publicType.qualifier.storage == EvqVaryingIn)
9846
intermediate.setNonCoherentDepthAttachmentReadEXT();
9847
else
9848
error(loc, "can only apply to 'in'", "non_coherent_depth_attachment_readEXT", "");
9849
}
9850
if (publicType.shaderQualifiers.nonCoherentStencilAttachmentReadEXT) {
9851
if (publicType.qualifier.storage == EvqVaryingIn)
9852
intermediate.setNonCoherentStencilAttachmentReadEXT();
9853
else
9854
error(loc, "can only apply to 'in'", "non_coherent_stencil_attachment_readEXT", "");
9855
}
9856
if (publicType.shaderQualifiers.hasBlendEquation()) {
9857
if (publicType.qualifier.storage != EvqVaryingOut)
9858
error(loc, "can only apply to 'out'", "blend equation", "");
9859
}
9860
if (publicType.shaderQualifiers.interlockOrdering) {
9861
if (publicType.qualifier.storage == EvqVaryingIn) {
9862
if (!intermediate.setInterlockOrdering(publicType.shaderQualifiers.interlockOrdering))
9863
error(loc, "cannot change previously set fragment shader interlock ordering", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), "");
9864
}
9865
else
9866
error(loc, "can only apply to 'in'", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), "");
9867
}
9868
9869
if (publicType.shaderQualifiers.layoutDerivativeGroupQuads &&
9870
publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
9871
error(loc, "cannot be both specified", "derivative_group_quadsNV and derivative_group_linearNV", "");
9872
}
9873
9874
if (publicType.shaderQualifiers.layoutDerivativeGroupQuads) {
9875
if (publicType.qualifier.storage == EvqVaryingIn) {
9876
if ((intermediate.getLocalSize(0) & 1) ||
9877
(intermediate.getLocalSize(1) & 1))
9878
error(loc, "requires local_size_x and local_size_y to be multiple of two", "derivative_group_quadsNV", "");
9879
else
9880
intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupQuads);
9881
}
9882
else
9883
error(loc, "can only apply to 'in'", "derivative_group_quadsNV", "");
9884
}
9885
if (publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
9886
if (publicType.qualifier.storage == EvqVaryingIn) {
9887
if((intermediate.getLocalSize(0) *
9888
intermediate.getLocalSize(1) *
9889
intermediate.getLocalSize(2)) % 4 != 0)
9890
error(loc, "requires total group size to be multiple of four", "derivative_group_linearNV", "");
9891
else
9892
intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupLinear);
9893
}
9894
else
9895
error(loc, "can only apply to 'in'", "derivative_group_linearNV", "");
9896
}
9897
// Check mesh out array sizes, once all the necessary out qualifiers are defined.
9898
if ((language == EShLangMesh) &&
9899
(intermediate.getVertices() != TQualifier::layoutNotSet) &&
9900
(intermediate.getPrimitives() != TQualifier::layoutNotSet) &&
9901
(intermediate.getOutputPrimitive() != ElgNone))
9902
{
9903
checkIoArraysConsistency(loc);
9904
}
9905
9906
if (publicType.shaderQualifiers.layoutPrimitiveCulling) {
9907
if (publicType.qualifier.storage != EvqTemporary)
9908
error(loc, "layout qualifier can not have storage qualifiers", "primitive_culling","", "");
9909
else {
9910
intermediate.setLayoutPrimitiveCulling();
9911
}
9912
// Exit early as further checks are not valid
9913
return;
9914
}
9915
9916
const TQualifier& qualifier = publicType.qualifier;
9917
9918
if (qualifier.isAuxiliary() ||
9919
qualifier.isMemory() ||
9920
qualifier.isInterpolation() ||
9921
qualifier.precision != EpqNone)
9922
error(loc, "cannot use auxiliary, memory, interpolation, or precision qualifier in a default qualifier declaration (declaration with no type)", "qualifier", "");
9923
9924
// "The offset qualifier can only be used on block members of blocks..."
9925
// "The align qualifier can only be used on blocks or block members..."
9926
if (qualifier.hasOffset() ||
9927
qualifier.hasAlign())
9928
error(loc, "cannot use offset or align qualifiers in a default qualifier declaration (declaration with no type)", "layout qualifier", "");
9929
9930
layoutQualifierCheck(loc, qualifier);
9931
9932
switch (qualifier.storage) {
9933
case EvqUniform:
9934
if (qualifier.hasMatrix())
9935
globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix;
9936
if (qualifier.hasPacking())
9937
globalUniformDefaults.layoutPacking = qualifier.layoutPacking;
9938
break;
9939
case EvqBuffer:
9940
if (qualifier.hasMatrix())
9941
globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix;
9942
if (qualifier.hasPacking())
9943
globalBufferDefaults.layoutPacking = qualifier.layoutPacking;
9944
break;
9945
case EvqVaryingIn:
9946
break;
9947
case EvqVaryingOut:
9948
if (qualifier.hasStream())
9949
globalOutputDefaults.layoutStream = qualifier.layoutStream;
9950
if (qualifier.hasXfbBuffer())
9951
globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer;
9952
if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) {
9953
if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride))
9954
error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
9955
}
9956
break;
9957
case EvqShared:
9958
if (qualifier.hasMatrix())
9959
globalSharedDefaults.layoutMatrix = qualifier.layoutMatrix;
9960
if (qualifier.hasPacking())
9961
globalSharedDefaults.layoutPacking = qualifier.layoutPacking;
9962
break;
9963
default:
9964
error(loc, "default qualifier requires 'uniform', 'buffer', 'in', 'out' or 'shared' storage qualification", "", "");
9965
return;
9966
}
9967
9968
if (qualifier.hasBinding())
9969
error(loc, "cannot declare a default, include a type or full declaration", "binding", "");
9970
if (qualifier.hasAnyLocation())
9971
error(loc, "cannot declare a default, use a full declaration", "location/component/index", "");
9972
if (qualifier.hasXfbOffset())
9973
error(loc, "cannot declare a default, use a full declaration", "xfb_offset", "");
9974
if (qualifier.isPushConstant())
9975
error(loc, "cannot declare a default, can only be used on a block", "push_constant", "");
9976
if (qualifier.hasBufferReference())
9977
error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", "");
9978
if (qualifier.hasSpecConstantId())
9979
error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", "");
9980
if (qualifier.isShaderRecord())
9981
error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", "");
9982
}
9983
9984
//
9985
// Take the sequence of statements that has been built up since the last case/default,
9986
// put it on the list of top-level nodes for the current (inner-most) switch statement,
9987
// and follow that by the case/default we are on now. (See switch topology comment on
9988
// TIntermSwitch.)
9989
//
9990
void TParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode)
9991
{
9992
TIntermSequence* switchSequence = switchSequenceStack.back();
9993
9994
if (statements) {
9995
if (switchSequence->size() == 0)
9996
error(statements->getLoc(), "cannot have statements before first case/default label", "switch", "");
9997
statements->setOperator(EOpSequence);
9998
switchSequence->push_back(statements);
9999
}
10000
if (branchNode) {
10001
// check all previous cases for the same label (or both are 'default')
10002
for (unsigned int s = 0; s < switchSequence->size(); ++s) {
10003
TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode();
10004
if (prevBranch) {
10005
TIntermTyped* prevExpression = prevBranch->getExpression();
10006
TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression();
10007
if (prevExpression == nullptr && newExpression == nullptr)
10008
error(branchNode->getLoc(), "duplicate label", "default", "");
10009
else if (prevExpression != nullptr &&
10010
newExpression != nullptr &&
10011
prevExpression->getAsConstantUnion() &&
10012
newExpression->getAsConstantUnion() &&
10013
prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() ==
10014
newExpression->getAsConstantUnion()->getConstArray()[0].getIConst())
10015
error(branchNode->getLoc(), "duplicated value", "case", "");
10016
}
10017
}
10018
switchSequence->push_back(branchNode);
10019
}
10020
}
10021
10022
//
10023
// Turn the top-level node sequence built up of wrapupSwitchSubsequence9)
10024
// into a switch node.
10025
//
10026
TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression, TIntermAggregate* lastStatements)
10027
{
10028
profileRequires(loc, EEsProfile, 300, nullptr, "switch statements");
10029
profileRequires(loc, ENoProfile, 130, nullptr, "switch statements");
10030
10031
wrapupSwitchSubsequence(lastStatements, nullptr);
10032
10033
if (expression == nullptr ||
10034
(expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) ||
10035
expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector())
10036
error(loc, "condition must be a scalar integer expression", "switch", "");
10037
10038
// If there is nothing to do, drop the switch but still execute the expression
10039
TIntermSequence* switchSequence = switchSequenceStack.back();
10040
if (switchSequence->size() == 0)
10041
return expression;
10042
10043
if (lastStatements == nullptr) {
10044
// This was originally an ERRROR, because early versions of the specification said
10045
// "it is an error to have no statement between a label and the end of the switch statement."
10046
// The specifications were updated to remove this (being ill-defined what a "statement" was),
10047
// so, this became a warning. However, 3.0 tests still check for the error.
10048
if (isEsProfile() && (version <= 300 || version >= 320) && ! relaxedErrors())
10049
error(loc, "last case/default label not followed by statements", "switch", "");
10050
else if (!isEsProfile() && (version <= 430 || version >= 460))
10051
error(loc, "last case/default label not followed by statements", "switch", "");
10052
else
10053
warn(loc, "last case/default label not followed by statements", "switch", "");
10054
10055
10056
// emulate a break for error recovery
10057
lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc));
10058
lastStatements->setOperator(EOpSequence);
10059
switchSequence->push_back(lastStatements);
10060
}
10061
10062
TIntermAggregate* body = new TIntermAggregate(EOpSequence);
10063
body->getSequence() = *switchSequenceStack.back();
10064
body->setLoc(loc);
10065
10066
TIntermSwitch* switchNode = new TIntermSwitch(expression, body);
10067
switchNode->setLoc(loc);
10068
10069
return switchNode;
10070
}
10071
10072
//
10073
// When a struct used in block, and has it's own layout packing, layout matrix,
10074
// record the origin structure of a struct to map, and Record the structure copy to the copy table,
10075
//
10076
const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType)
10077
{
10078
size_t memberCount = tmpType->getStruct()->size();
10079
size_t originHash = 0, tmpHash = 0;
10080
std::hash<size_t> hasher;
10081
for (size_t i = 0; i < memberCount; i++) {
10082
size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking +
10083
originType->getStruct()->at(i).type->getQualifier().layoutMatrix);
10084
size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking +
10085
tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix);
10086
originHash = hasher((originHash ^ originMemberHash) << 1);
10087
tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1);
10088
}
10089
const TTypeList* originStruct = originType->getStruct();
10090
const TTypeList* tmpStruct = tmpType->getStruct();
10091
if (originHash != tmpHash) {
10092
auto fixRecords = record.find(originStruct);
10093
if (fixRecords != record.end()) {
10094
auto fixRecord = fixRecords->second.find(tmpHash);
10095
if (fixRecord != fixRecords->second.end()) {
10096
return fixRecord->second;
10097
} else {
10098
record[originStruct][tmpHash] = tmpStruct;
10099
return tmpStruct;
10100
}
10101
} else {
10102
record[originStruct] = std::map<size_t, const TTypeList*>();
10103
record[originStruct][tmpHash] = tmpStruct;
10104
return tmpStruct;
10105
}
10106
}
10107
return originStruct;
10108
}
10109
10110
TLayoutFormat TParseContext::mapLegacyLayoutFormat(TLayoutFormat legacyLayoutFormat, TBasicType imageType)
10111
{
10112
TLayoutFormat layoutFormat = ElfNone;
10113
if (imageType == EbtFloat) {
10114
switch (legacyLayoutFormat) {
10115
case ElfSize1x16: layoutFormat = ElfR16f; break;
10116
case ElfSize1x32: layoutFormat = ElfR32f; break;
10117
case ElfSize2x32: layoutFormat = ElfRg32f; break;
10118
case ElfSize4x32: layoutFormat = ElfRgba32f; break;
10119
default: break;
10120
}
10121
} else if (imageType == EbtUint) {
10122
switch (legacyLayoutFormat) {
10123
case ElfSize1x8: layoutFormat = ElfR8ui; break;
10124
case ElfSize1x16: layoutFormat = ElfR16ui; break;
10125
case ElfSize1x32: layoutFormat = ElfR32ui; break;
10126
case ElfSize2x32: layoutFormat = ElfRg32ui; break;
10127
case ElfSize4x32: layoutFormat = ElfRgba32ui; break;
10128
default: break;
10129
}
10130
} else if (imageType == EbtInt) {
10131
switch (legacyLayoutFormat) {
10132
case ElfSize1x8: layoutFormat = ElfR8i; break;
10133
case ElfSize1x16: layoutFormat = ElfR16i; break;
10134
case ElfSize1x32: layoutFormat = ElfR32i; break;
10135
case ElfSize2x32: layoutFormat = ElfRg32i; break;
10136
case ElfSize4x32: layoutFormat = ElfRgba32i; break;
10137
default: break;
10138
}
10139
}
10140
10141
return layoutFormat;
10142
}
10143
10144
} // end namespace glslang
10145
10146