1 //
2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 //    Redistributions of source code must retain the above copyright
13 //    notice, this list of conditions and the following disclaimer.
14 //
15 //    Redistributions in binary form must reproduce the above
16 //    copyright notice, this list of conditions and the following
17 //    disclaimer in the documentation and/or other materials provided
18 //    with the distribution.
19 //
20 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 //    contributors may be used to endorse or promote products derived
22 //    from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37 
38 //
39 // Do link-time merging and validation of intermediate representations.
40 //
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance.  Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage.  A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
47 //
48 
49 #include "localintermediate.h"
50 #include "../Include/InfoSink.h"
51 
52 namespace glslang {
53 
54 //
55 // Link-time error emitter.
56 //
error(TInfoSink & infoSink,const char * message)57 void TIntermediate::error(TInfoSink& infoSink, const char* message)
58 {
59     infoSink.info.prefix(EPrefixError);
60     infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
61 
62     ++numErrors;
63 }
64 
65 // Link-time warning.
warn(TInfoSink & infoSink,const char * message)66 void TIntermediate::warn(TInfoSink& infoSink, const char* message)
67 {
68     infoSink.info.prefix(EPrefixWarning);
69     infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
70 }
71 
72 // TODO: 4.4 offset/align:  "Two blocks linked together in the same program with the same block
73 // name must have the exact same set of members qualified with offset and their integral-constant
74 // expression values must be the same, or a link-time error results."
75 
76 //
77 // Merge the information from 'unit' into 'this'
78 //
merge(TInfoSink & infoSink,TIntermediate & unit)79 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
80 {
81     mergeCallGraphs(infoSink, unit);
82     mergeModes(infoSink, unit);
83     mergeTrees(infoSink, unit);
84 }
85 
mergeCallGraphs(TInfoSink & infoSink,TIntermediate & unit)86 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
87 {
88     if (unit.getNumEntryPoints() > 0) {
89         if (getNumEntryPoints() > 0)
90             error(infoSink, "can't handle multiple entry points per stage");
91         else {
92             entryPointName = unit.getEntryPointName();
93             entryPointMangledName = unit.getEntryPointMangledName();
94         }
95     }
96     numEntryPoints += unit.getNumEntryPoints();
97 
98     callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
99 }
100 
101 #define MERGE_MAX(member) member = std::max(member, unit.member)
102 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
103 
mergeModes(TInfoSink & infoSink,TIntermediate & unit)104 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
105 {
106     if (language != unit.language)
107         error(infoSink, "stages must match when linking into a single stage");
108 
109     if (source == EShSourceNone)
110         source = unit.source;
111     if (source != unit.source)
112         error(infoSink, "can't link compilation units from different source languages");
113 
114     if (treeRoot == nullptr) {
115         profile = unit.profile;
116         version = unit.version;
117         requestedExtensions = unit.requestedExtensions;
118     } else {
119         if ((profile == EEsProfile) != (unit.profile == EEsProfile))
120             error(infoSink, "Cannot cross link ES and desktop profiles");
121         else if (unit.profile == ECompatibilityProfile)
122             profile = ECompatibilityProfile;
123         version = std::max(version, unit.version);
124         requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
125     }
126 
127     MERGE_MAX(spvVersion.spv);
128     MERGE_MAX(spvVersion.vulkanGlsl);
129     MERGE_MAX(spvVersion.vulkan);
130     MERGE_MAX(spvVersion.openGl);
131 
132     numErrors += unit.getNumErrors();
133     numPushConstants += unit.numPushConstants;
134 
135     if (unit.invocations != TQualifier::layoutNotSet) {
136         if (invocations == TQualifier::layoutNotSet)
137             invocations = unit.invocations;
138         else if (invocations != unit.invocations)
139             error(infoSink, "number of invocations must match between compilation units");
140     }
141 
142     if (vertices == TQualifier::layoutNotSet)
143         vertices = unit.vertices;
144     else if (vertices != unit.vertices) {
145         if (language == EShLangGeometry
146 #ifdef NV_EXTENSIONS
147             || language == EShLangMeshNV
148 #endif
149             )
150             error(infoSink, "Contradictory layout max_vertices values");
151         else if (language == EShLangTessControl)
152             error(infoSink, "Contradictory layout vertices values");
153         else
154             assert(0);
155     }
156 #ifdef NV_EXTENSIONS
157     if (primitives == TQualifier::layoutNotSet)
158         primitives = unit.primitives;
159     else if (primitives != unit.primitives) {
160         if (language == EShLangMeshNV)
161             error(infoSink, "Contradictory layout max_primitives values");
162         else
163             assert(0);
164     }
165 #endif
166 
167     if (inputPrimitive == ElgNone)
168         inputPrimitive = unit.inputPrimitive;
169     else if (inputPrimitive != unit.inputPrimitive)
170         error(infoSink, "Contradictory input layout primitives");
171 
172     if (outputPrimitive == ElgNone)
173         outputPrimitive = unit.outputPrimitive;
174     else if (outputPrimitive != unit.outputPrimitive)
175         error(infoSink, "Contradictory output layout primitives");
176 
177     if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
178         error(infoSink, "gl_FragCoord redeclarations must match across shaders");
179 
180     if (vertexSpacing == EvsNone)
181         vertexSpacing = unit.vertexSpacing;
182     else if (vertexSpacing != unit.vertexSpacing)
183         error(infoSink, "Contradictory input vertex spacing");
184 
185     if (vertexOrder == EvoNone)
186         vertexOrder = unit.vertexOrder;
187     else if (vertexOrder != unit.vertexOrder)
188         error(infoSink, "Contradictory triangle ordering");
189 
190     MERGE_TRUE(pointMode);
191 
192     for (int i = 0; i < 3; ++i) {
193         if (localSize[i] > 1)
194             localSize[i] = unit.localSize[i];
195         else if (localSize[i] != unit.localSize[i])
196             error(infoSink, "Contradictory local size");
197 
198         if (localSizeSpecId[i] != TQualifier::layoutNotSet)
199             localSizeSpecId[i] = unit.localSizeSpecId[i];
200         else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
201             error(infoSink, "Contradictory local size specialization ids");
202     }
203 
204     MERGE_TRUE(earlyFragmentTests);
205     MERGE_TRUE(postDepthCoverage);
206 
207     if (depthLayout == EldNone)
208         depthLayout = unit.depthLayout;
209     else if (depthLayout != unit.depthLayout)
210         error(infoSink, "Contradictory depth layouts");
211 
212     MERGE_TRUE(depthReplacing);
213     MERGE_TRUE(hlslFunctionality1);
214 
215     blendEquations |= unit.blendEquations;
216 
217     MERGE_TRUE(xfbMode);
218 
219     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
220         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
221             xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
222         else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
223             error(infoSink, "Contradictory xfb_stride");
224         xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
225         if (unit.xfbBuffers[b].contains64BitType)
226             xfbBuffers[b].contains64BitType = true;
227 #ifdef AMD_EXTENSIONS
228         if (unit.xfbBuffers[b].contains32BitType)
229             xfbBuffers[b].contains32BitType = true;
230         if (unit.xfbBuffers[b].contains16BitType)
231             xfbBuffers[b].contains16BitType = true;
232 #endif
233         // TODO: 4.4 link: enhanced layouts: compare ranges
234     }
235 
236     MERGE_TRUE(multiStream);
237 
238 #ifdef NV_EXTENSIONS
239     MERGE_TRUE(layoutOverrideCoverage);
240     MERGE_TRUE(geoPassthroughEXT);
241 #endif
242 
243     for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
244         if (unit.shiftBinding[i] > 0)
245             setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
246     }
247 
248     for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
249         for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
250             setShiftBindingForSet((TResourceType)i, it->second, it->first);
251     }
252 
253     resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
254 
255     MERGE_TRUE(autoMapBindings);
256     MERGE_TRUE(autoMapLocations);
257     MERGE_TRUE(invertY);
258     MERGE_TRUE(flattenUniformArrays);
259     MERGE_TRUE(useUnknownFormat);
260     MERGE_TRUE(hlslOffsets);
261     MERGE_TRUE(useStorageBuffer);
262     MERGE_TRUE(hlslIoMapping);
263 
264     // TODO: sourceFile
265     // TODO: sourceText
266     // TODO: processes
267 
268     MERGE_TRUE(needToLegalize);
269     MERGE_TRUE(binaryDoubleOutput);
270     MERGE_TRUE(usePhysicalStorageBuffer);
271 }
272 
273 //
274 // Merge the 'unit' AST into 'this' AST.
275 // That includes rationalizing the unique IDs, which were set up independently,
276 // and might have overlaps that are not the same symbol, or might have different
277 // IDs for what should be the same shared symbol.
278 //
mergeTrees(TInfoSink & infoSink,TIntermediate & unit)279 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
280 {
281     if (unit.treeRoot == nullptr)
282         return;
283 
284     if (treeRoot == nullptr) {
285         treeRoot = unit.treeRoot;
286         return;
287     }
288 
289     // Getting this far means we have two existing trees to merge...
290 #ifdef NV_EXTENSIONS
291     numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
292 #endif
293 
294 #ifdef NV_EXTENSIONS
295     numTaskNVBlocks += unit.numTaskNVBlocks;
296 #endif
297 
298     // Get the top-level globals of each unit
299     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
300     TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
301 
302     // Get the linker-object lists
303     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
304     const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
305 
306     // Map by global name to unique ID to rationalize the same object having
307     // differing IDs in different trees.
308     TMap<TString, int> idMap;
309     int maxId;
310     seedIdMap(idMap, maxId);
311     remapIds(idMap, maxId + 1, unit);
312 
313     mergeBodies(infoSink, globals, unitGlobals);
314     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
315     ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
316 }
317 
318 // Traverser that seeds an ID map with all built-ins, and tracks the
319 // maximum ID used.
320 // (It would be nice to put this in a function, but that causes warnings
321 // on having no bodies for the copy-constructor/operator=.)
322 class TBuiltInIdTraverser : public TIntermTraverser {
323 public:
TBuiltInIdTraverser(TMap<TString,int> & idMap)324     TBuiltInIdTraverser(TMap<TString, int>& idMap) : idMap(idMap), maxId(0) { }
325     // If it's a built in, add it to the map.
326     // Track the max ID.
visitSymbol(TIntermSymbol * symbol)327     virtual void visitSymbol(TIntermSymbol* symbol)
328     {
329         const TQualifier& qualifier = symbol->getType().getQualifier();
330         if (qualifier.builtIn != EbvNone)
331             idMap[symbol->getName()] = symbol->getId();
332         maxId = std::max(maxId, symbol->getId());
333     }
getMaxId() const334     int getMaxId() const { return maxId; }
335 protected:
336     TBuiltInIdTraverser(TBuiltInIdTraverser&);
337     TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
338     TMap<TString, int>& idMap;
339     int maxId;
340 };
341 
342 // Traverser that seeds an ID map with non-builtins.
343 // (It would be nice to put this in a function, but that causes warnings
344 // on having no bodies for the copy-constructor/operator=.)
345 class TUserIdTraverser : public TIntermTraverser {
346 public:
TUserIdTraverser(TMap<TString,int> & idMap)347     TUserIdTraverser(TMap<TString, int>& idMap) : idMap(idMap) { }
348     // If its a non-built-in global, add it to the map.
visitSymbol(TIntermSymbol * symbol)349     virtual void visitSymbol(TIntermSymbol* symbol)
350     {
351         const TQualifier& qualifier = symbol->getType().getQualifier();
352         if (qualifier.builtIn == EbvNone)
353             idMap[symbol->getName()] = symbol->getId();
354     }
355 
356 protected:
357     TUserIdTraverser(TUserIdTraverser&);
358     TUserIdTraverser& operator=(TUserIdTraverser&);
359     TMap<TString, int>& idMap; // over biggest id
360 };
361 
362 // Initialize the the ID map with what we know of 'this' AST.
seedIdMap(TMap<TString,int> & idMap,int & maxId)363 void TIntermediate::seedIdMap(TMap<TString, int>& idMap, int& maxId)
364 {
365     // all built-ins everywhere need to align on IDs and contribute to the max ID
366     TBuiltInIdTraverser builtInIdTraverser(idMap);
367     treeRoot->traverse(&builtInIdTraverser);
368     maxId = builtInIdTraverser.getMaxId();
369 
370     // user variables in the linker object list need to align on ids
371     TUserIdTraverser userIdTraverser(idMap);
372     findLinkerObjects()->traverse(&userIdTraverser);
373 }
374 
375 // Traverser to map an AST ID to what was known from the seeding AST.
376 // (It would be nice to put this in a function, but that causes warnings
377 // on having no bodies for the copy-constructor/operator=.)
378 class TRemapIdTraverser : public TIntermTraverser {
379 public:
TRemapIdTraverser(const TMap<TString,int> & idMap,int idShift)380     TRemapIdTraverser(const TMap<TString, int>& idMap, int idShift) : idMap(idMap), idShift(idShift) { }
381     // Do the mapping:
382     //  - if the same symbol, adopt the 'this' ID
383     //  - otherwise, ensure a unique ID by shifting to a new space
visitSymbol(TIntermSymbol * symbol)384     virtual void visitSymbol(TIntermSymbol* symbol)
385     {
386         const TQualifier& qualifier = symbol->getType().getQualifier();
387         bool remapped = false;
388         if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
389             auto it = idMap.find(symbol->getName());
390             if (it != idMap.end()) {
391                 symbol->changeId(it->second);
392                 remapped = true;
393             }
394         }
395         if (!remapped)
396             symbol->changeId(symbol->getId() + idShift);
397     }
398 protected:
399     TRemapIdTraverser(TRemapIdTraverser&);
400     TRemapIdTraverser& operator=(TRemapIdTraverser&);
401     const TMap<TString, int>& idMap;
402     int idShift;
403 };
404 
remapIds(const TMap<TString,int> & idMap,int idShift,TIntermediate & unit)405 void TIntermediate::remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate& unit)
406 {
407     // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
408     TRemapIdTraverser idTraverser(idMap, idShift);
409     unit.getTreeRoot()->traverse(&idTraverser);
410 }
411 
412 //
413 // Merge the function bodies and global-level initializers from unitGlobals into globals.
414 // Will error check duplication of function bodies for the same signature.
415 //
mergeBodies(TInfoSink & infoSink,TIntermSequence & globals,const TIntermSequence & unitGlobals)416 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
417 {
418     // TODO: link-time performance: Processing in alphabetical order will be faster
419 
420     // Error check the global objects, not including the linker objects
421     for (unsigned int child = 0; child < globals.size() - 1; ++child) {
422         for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
423             TIntermAggregate* body = globals[child]->getAsAggregate();
424             TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
425             if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
426                 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
427                 infoSink.info << "    " << globals[child]->getAsAggregate()->getName() << "\n";
428             }
429         }
430     }
431 
432     // Merge the global objects, just in front of the linker objects
433     globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
434 }
435 
436 //
437 // Merge the linker objects from unitLinkerObjects into linkerObjects.
438 // Duplication is expected and filtered out, but contradictions are an error.
439 //
mergeLinkerObjects(TInfoSink & infoSink,TIntermSequence & linkerObjects,const TIntermSequence & unitLinkerObjects)440 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects)
441 {
442     // Error check and merge the linker objects (duplicates should not be created)
443     std::size_t initialNumLinkerObjects = linkerObjects.size();
444     for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
445         bool merge = true;
446         for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
447             TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
448             TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
449             assert(symbol && unitSymbol);
450             if (symbol->getName() == unitSymbol->getName()) {
451                 // filter out copy
452                 merge = false;
453 
454                 // but if one has an initializer and the other does not, update
455                 // the initializer
456                 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
457                     symbol->setConstArray(unitSymbol->getConstArray());
458 
459                 // Similarly for binding
460                 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
461                     symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
462 
463                 // Update implicit array sizes
464                 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
465 
466                 // Check for consistent types/qualification/initializers etc.
467                 mergeErrorCheck(infoSink, *symbol, *unitSymbol, false);
468             }
469         }
470         if (merge)
471             linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
472     }
473 }
474 
475 // TODO 4.5 link functionality: cull distance array size checking
476 
477 // Recursively merge the implicit array sizes through the objects' respective type trees.
mergeImplicitArraySizes(TType & type,const TType & unitType)478 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
479 {
480     if (type.isUnsizedArray()) {
481         if (unitType.isUnsizedArray()) {
482             type.updateImplicitArraySize(unitType.getImplicitArraySize());
483             if (unitType.isArrayVariablyIndexed())
484                 type.setArrayVariablyIndexed();
485         } else if (unitType.isSizedArray())
486             type.changeOuterArraySize(unitType.getOuterArraySize());
487     }
488 
489     // Type mismatches are caught and reported after this, just be careful for now.
490     if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
491         return;
492 
493     for (int i = 0; i < (int)type.getStruct()->size(); ++i)
494         mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
495 }
496 
497 //
498 // Compare two global objects from two compilation units and see if they match
499 // well enough.  Rules can be different for intra- vs. cross-stage matching.
500 //
501 // This function only does one of intra- or cross-stage matching per call.
502 //
mergeErrorCheck(TInfoSink & infoSink,const TIntermSymbol & symbol,const TIntermSymbol & unitSymbol,bool crossStage)503 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
504 {
505     bool writeTypeComparison = false;
506 
507     // Types have to match
508     if (symbol.getType() != unitSymbol.getType()) {
509         // but, we make an exception if one is an implicit array and the other is sized
510         if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
511                 symbol.getType().sameElementType(unitSymbol.getType()) &&
512                 (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) {
513             error(infoSink, "Types must match:");
514             writeTypeComparison = true;
515         }
516     }
517 
518     // Qualifiers have to (almost) match
519 
520     // Storage...
521     if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
522         error(infoSink, "Storage qualifiers must match:");
523         writeTypeComparison = true;
524     }
525 
526     // Precision...
527     if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
528         error(infoSink, "Precision qualifiers must match:");
529         writeTypeComparison = true;
530     }
531 
532     // Invariance...
533     if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
534         error(infoSink, "Presence of invariant qualifier must match:");
535         writeTypeComparison = true;
536     }
537 
538     // Precise...
539     if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
540         error(infoSink, "Presence of precise qualifier must match:");
541         writeTypeComparison = true;
542     }
543 
544     // Auxiliary and interpolation...
545     if (symbol.getQualifier().centroid  != unitSymbol.getQualifier().centroid ||
546         symbol.getQualifier().smooth    != unitSymbol.getQualifier().smooth ||
547         symbol.getQualifier().flat      != unitSymbol.getQualifier().flat ||
548         symbol.getQualifier().sample    != unitSymbol.getQualifier().sample ||
549         symbol.getQualifier().patch     != unitSymbol.getQualifier().patch ||
550         symbol.getQualifier().nopersp   != unitSymbol.getQualifier().nopersp) {
551         error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
552         writeTypeComparison = true;
553     }
554 
555     // Memory...
556     if (symbol.getQualifier().coherent          != unitSymbol.getQualifier().coherent ||
557         symbol.getQualifier().devicecoherent    != unitSymbol.getQualifier().devicecoherent ||
558         symbol.getQualifier().queuefamilycoherent  != unitSymbol.getQualifier().queuefamilycoherent ||
559         symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
560         symbol.getQualifier().subgroupcoherent  != unitSymbol.getQualifier().subgroupcoherent ||
561         symbol.getQualifier().nonprivate        != unitSymbol.getQualifier().nonprivate ||
562         symbol.getQualifier().volatil           != unitSymbol.getQualifier().volatil ||
563         symbol.getQualifier().restrict          != unitSymbol.getQualifier().restrict ||
564         symbol.getQualifier().readonly          != unitSymbol.getQualifier().readonly ||
565         symbol.getQualifier().writeonly         != unitSymbol.getQualifier().writeonly) {
566         error(infoSink, "Memory qualifiers must match:");
567         writeTypeComparison = true;
568     }
569 
570     // Layouts...
571     // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
572     //       requires separate user-supplied offset from actual computed offset, but
573     //       current implementation only has one offset.
574     if (symbol.getQualifier().layoutMatrix    != unitSymbol.getQualifier().layoutMatrix ||
575         symbol.getQualifier().layoutPacking   != unitSymbol.getQualifier().layoutPacking ||
576         symbol.getQualifier().layoutLocation  != unitSymbol.getQualifier().layoutLocation ||
577         symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
578         symbol.getQualifier().layoutIndex     != unitSymbol.getQualifier().layoutIndex ||
579         symbol.getQualifier().layoutBinding   != unitSymbol.getQualifier().layoutBinding ||
580         (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
581         error(infoSink, "Layout qualification must match:");
582         writeTypeComparison = true;
583     }
584 
585     // Initializers have to match, if both are present, and if we don't already know the types don't match
586     if (! writeTypeComparison) {
587         if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
588             if (symbol.getConstArray() != unitSymbol.getConstArray()) {
589                 error(infoSink, "Initializers must match:");
590                 infoSink.info << "    " << symbol.getName() << "\n";
591             }
592         }
593     }
594 
595     if (writeTypeComparison)
596         infoSink.info << "    " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
597                                                              unitSymbol.getType().getCompleteString() << "\"\n";
598 }
599 
600 //
601 // Do final link-time error checking of a complete (merged) intermediate representation.
602 // (Much error checking was done during merging).
603 //
604 // Also, lock in defaults of things not set, including array sizes.
605 //
finalCheck(TInfoSink & infoSink,bool keepUncalled)606 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
607 {
608     if (getTreeRoot() == nullptr)
609         return;
610 
611     if (numEntryPoints < 1) {
612         if (source == EShSourceGlsl)
613             error(infoSink, "Missing entry point: Each stage requires one entry point");
614         else
615             warn(infoSink, "Entry point not found");
616     }
617 
618     if (numPushConstants > 1)
619         error(infoSink, "Only one push_constant block is allowed per stage");
620 
621     // recursion and missing body checking
622     checkCallGraphCycles(infoSink);
623     checkCallGraphBodies(infoSink, keepUncalled);
624 
625     // overlap/alias/missing I/O, etc.
626     inOutLocationCheck(infoSink);
627 
628     // invocations
629     if (invocations == TQualifier::layoutNotSet)
630         invocations = 1;
631 
632     if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
633         error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
634     if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
635         error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
636 
637     if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
638         error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
639     if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
640         error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
641 
642     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
643         if (xfbBuffers[b].contains64BitType)
644             RoundToPow2(xfbBuffers[b].implicitStride, 8);
645 #ifdef AMD_EXTENSIONS
646         else if (xfbBuffers[b].contains32BitType)
647             RoundToPow2(xfbBuffers[b].implicitStride, 4);
648         else if (xfbBuffers[b].contains16BitType)
649             RoundToPow2(xfbBuffers[b].implicitStride, 2);
650 #endif
651 
652         // "It is a compile-time or link-time error to have
653         // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
654         // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
655         // compile-time or link-time error to have different values specified for the stride for the same buffer."
656         if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
657             error(infoSink, "xfb_stride is too small to hold all buffer entries:");
658             infoSink.info.prefix(EPrefixError);
659             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
660         }
661         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
662             xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
663 
664         // "If the buffer is capturing any
665         // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
666         // multiple of 4, or a compile-time or link-time error results."
667         if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
668             error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
669             infoSink.info.prefix(EPrefixError);
670             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
671 #ifdef AMD_EXTENSIONS
672         } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
673 #else
674         } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
675 #endif
676             error(infoSink, "xfb_stride must be multiple of 4:");
677             infoSink.info.prefix(EPrefixError);
678             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
679         }
680 #ifdef AMD_EXTENSIONS
681         // "If the buffer is capturing any
682         // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
683         else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
684             error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
685             infoSink.info.prefix(EPrefixError);
686             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
687         }
688 
689 #endif
690         // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
691         // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
692         if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
693             error(infoSink, "xfb_stride is too large:");
694             infoSink.info.prefix(EPrefixError);
695             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n";
696         }
697     }
698 
699     switch (language) {
700     case EShLangVertex:
701         break;
702     case EShLangTessControl:
703         if (vertices == TQualifier::layoutNotSet)
704             error(infoSink, "At least one shader must specify an output layout(vertices=...)");
705         break;
706     case EShLangTessEvaluation:
707         if (source == EShSourceGlsl) {
708             if (inputPrimitive == ElgNone)
709                 error(infoSink, "At least one shader must specify an input layout primitive");
710             if (vertexSpacing == EvsNone)
711                 vertexSpacing = EvsEqual;
712             if (vertexOrder == EvoNone)
713                 vertexOrder = EvoCcw;
714         }
715         break;
716     case EShLangGeometry:
717         if (inputPrimitive == ElgNone)
718             error(infoSink, "At least one shader must specify an input layout primitive");
719         if (outputPrimitive == ElgNone)
720             error(infoSink, "At least one shader must specify an output layout primitive");
721         if (vertices == TQualifier::layoutNotSet)
722             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
723         break;
724     case EShLangFragment:
725         // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
726         // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
727         // requiring explicit early_fragment_tests
728         if (getPostDepthCoverage() && !getEarlyFragmentTests())
729             error(infoSink, "post_depth_coverage requires early_fragment_tests");
730         break;
731     case EShLangCompute:
732         break;
733 
734 #ifdef NV_EXTENSIONS
735     case EShLangRayGenNV:
736     case EShLangIntersectNV:
737     case EShLangAnyHitNV:
738     case EShLangClosestHitNV:
739     case EShLangMissNV:
740     case EShLangCallableNV:
741         if (numShaderRecordNVBlocks > 1)
742             error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
743         break;
744     case EShLangMeshNV:
745         // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
746         if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
747             error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
748         if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
749             error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
750         if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
751             error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
752         if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
753             error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
754         if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
755             error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
756         if (outputPrimitive == ElgNone)
757             error(infoSink, "At least one shader must specify an output layout primitive");
758         if (vertices == TQualifier::layoutNotSet)
759             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
760         if (primitives == TQualifier::layoutNotSet)
761             error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
762         // fall through
763     case EShLangTaskNV:
764         if (numTaskNVBlocks > 1)
765             error(infoSink, "Only one taskNV interface block is allowed per shader");
766         break;
767 #endif
768 
769     default:
770         error(infoSink, "Unknown Stage.");
771         break;
772     }
773 
774     // Process the tree for any node-specific work.
775     class TFinalLinkTraverser : public TIntermTraverser {
776     public:
777         TFinalLinkTraverser() { }
778         virtual ~TFinalLinkTraverser() { }
779 
780         virtual void visitSymbol(TIntermSymbol* symbol)
781         {
782             // Implicitly size arrays.
783             // If an unsized array is left as unsized, it effectively
784             // becomes run-time sized.
785             symbol->getWritableType().adoptImplicitArraySizes(false);
786         }
787     } finalLinkTraverser;
788 
789     treeRoot->traverse(&finalLinkTraverser);
790 }
791 
792 //
793 // See if the call graph contains any static recursion, which is disallowed
794 // by the specification.
795 //
checkCallGraphCycles(TInfoSink & infoSink)796 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
797 {
798     // Clear fields we'll use for this.
799     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
800         call->visited = false;
801         call->currentPath = false;
802         call->errorGiven = false;
803     }
804 
805     //
806     // Loop, looking for a new connected subgraph.  One subgraph is handled per loop iteration.
807     //
808 
809     TCall* newRoot;
810     do {
811         // See if we have unvisited parts of the graph.
812         newRoot = 0;
813         for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
814             if (! call->visited) {
815                 newRoot = &(*call);
816                 break;
817             }
818         }
819 
820         // If not, we are done.
821         if (! newRoot)
822             break;
823 
824         // Otherwise, we found a new subgraph, process it:
825         // See what all can be reached by this new root, and if any of
826         // that is recursive.  This is done by depth-first traversals, seeing
827         // if a new call is found that was already in the currentPath (a back edge),
828         // thereby detecting recursion.
829         std::list<TCall*> stack;
830         newRoot->currentPath = true; // currentPath will be true iff it is on the stack
831         stack.push_back(newRoot);
832         while (! stack.empty()) {
833             // get a caller
834             TCall* call = stack.back();
835 
836             // Add to the stack just one callee.
837             // This algorithm always terminates, because only !visited and !currentPath causes a push
838             // and all pushes change currentPath to true, and all pops change visited to true.
839             TGraph::iterator child = callGraph.begin();
840             for (; child != callGraph.end(); ++child) {
841 
842                 // If we already visited this node, its whole subgraph has already been processed, so skip it.
843                 if (child->visited)
844                     continue;
845 
846                 if (call->callee == child->caller) {
847                     if (child->currentPath) {
848                         // Then, we found a back edge
849                         if (! child->errorGiven) {
850                             error(infoSink, "Recursion detected:");
851                             infoSink.info << "    " << call->callee << " calling " << child->callee << "\n";
852                             child->errorGiven = true;
853                             recursive = true;
854                         }
855                     } else {
856                         child->currentPath = true;
857                         stack.push_back(&(*child));
858                         break;
859                     }
860                 }
861             }
862             if (child == callGraph.end()) {
863                 // no more callees, we bottomed out, never look at this node again
864                 stack.back()->currentPath = false;
865                 stack.back()->visited = true;
866                 stack.pop_back();
867             }
868         }  // end while, meaning nothing left to process in this subtree
869 
870     } while (newRoot);  // redundant loop check; should always exit via the 'break' above
871 }
872 
873 //
874 // See which functions are reachable from the entry point and which have bodies.
875 // Reachable ones with missing bodies are errors.
876 // Unreachable bodies are dead code.
877 //
checkCallGraphBodies(TInfoSink & infoSink,bool keepUncalled)878 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
879 {
880     // Clear fields we'll use for this.
881     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
882         call->visited = false;
883         call->calleeBodyPosition = -1;
884     }
885 
886     // The top level of the AST includes function definitions (bodies).
887     // Compare these to function calls in the call graph.
888     // We'll end up knowing which have bodies, and if so,
889     // how to map the call-graph node to the location in the AST.
890     TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
891     std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
892     for (int f = 0; f < (int)functionSequence.size(); ++f) {
893         glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
894         if (node && (node->getOp() == glslang::EOpFunction)) {
895             if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
896                 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
897             for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
898                 if (call->callee == node->getName())
899                     call->calleeBodyPosition = f;
900             }
901         }
902     }
903 
904     // Start call-graph traversal by visiting the entry point nodes.
905     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
906         if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
907             call->visited = true;
908     }
909 
910     // Propagate 'visited' through the call-graph to every part of the graph it
911     // can reach (seeded with the entry-point setting above).
912     bool changed;
913     do {
914         changed = false;
915         for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
916             if (call1->visited) {
917                 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
918                     if (! call2->visited) {
919                         if (call1->callee == call2->caller) {
920                             changed = true;
921                             call2->visited = true;
922                         }
923                     }
924                 }
925             }
926         }
927     } while (changed);
928 
929     // Any call-graph node set to visited but without a callee body is an error.
930     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
931         if (call->visited) {
932             if (call->calleeBodyPosition == -1) {
933                 error(infoSink, "No function definition (body) found: ");
934                 infoSink.info << "    " << call->callee << "\n";
935             } else
936                 reachable[call->calleeBodyPosition] = true;
937         }
938     }
939 
940     // Bodies in the AST not reached by the call graph are dead;
941     // clear them out, since they can't be reached and also can't
942     // be translated further due to possibility of being ill defined.
943     if (! keepUncalled) {
944         for (int f = 0; f < (int)functionSequence.size(); ++f) {
945             if (! reachable[f])
946                 functionSequence[f] = nullptr;
947         }
948         functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
949     }
950 }
951 
952 //
953 // Satisfy rules for location qualifiers on inputs and outputs
954 //
inOutLocationCheck(TInfoSink & infoSink)955 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
956 {
957     // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
958     bool fragOutWithNoLocation = false;
959     int numFragOut = 0;
960 
961     // TODO: linker functionality: location collision checking
962 
963     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
964     for (size_t i = 0; i < linkObjects.size(); ++i) {
965         const TType& type = linkObjects[i]->getAsTyped()->getType();
966         const TQualifier& qualifier = type.getQualifier();
967         if (language == EShLangFragment) {
968             if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
969                 ++numFragOut;
970                 if (!qualifier.hasAnyLocation())
971                     fragOutWithNoLocation = true;
972             }
973         }
974     }
975 
976     if (profile == EEsProfile) {
977         if (numFragOut > 1 && fragOutWithNoLocation)
978             error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
979     }
980 }
981 
findLinkerObjects() const982 TIntermAggregate* TIntermediate::findLinkerObjects() const
983 {
984     // Get the top-level globals
985     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
986 
987     // Get the last member of the sequences, expected to be the linker-object lists
988     assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
989 
990     return globals.back()->getAsAggregate();
991 }
992 
993 // See if a variable was both a user-declared output and used.
994 // Note: the spec discusses writing to one, but this looks at read or write, which
995 // is more useful, and perhaps the spec should be changed to reflect that.
userOutputUsed() const996 bool TIntermediate::userOutputUsed() const
997 {
998     const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
999 
1000     bool found = false;
1001     for (size_t i = 0; i < linkerObjects.size(); ++i) {
1002         const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
1003         if (symbolNode.getQualifier().storage == EvqVaryingOut &&
1004             symbolNode.getName().compare(0, 3, "gl_") != 0 &&
1005             inIoAccessed(symbolNode.getName())) {
1006             found = true;
1007             break;
1008         }
1009     }
1010 
1011     return found;
1012 }
1013 
1014 // Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
1015 // as the accumulation is done.
1016 //
1017 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1018 //
1019 // typeCollision is set to true if there is no direct collision, but the types in the same location
1020 // are different.
1021 //
addUsedLocation(const TQualifier & qualifier,const TType & type,bool & typeCollision)1022 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
1023 {
1024     typeCollision = false;
1025 
1026     int set;
1027     if (qualifier.isPipeInput())
1028         set = 0;
1029     else if (qualifier.isPipeOutput())
1030         set = 1;
1031     else if (qualifier.storage == EvqUniform)
1032         set = 2;
1033     else if (qualifier.storage == EvqBuffer)
1034         set = 3;
1035     else
1036         return -1;
1037 
1038     int size;
1039     if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1040         if (type.isSizedArray())
1041             size = type.getCumulativeArraySize();
1042         else
1043             size = 1;
1044     } else {
1045         // Strip off the outer array dimension for those having an extra one.
1046         if (type.isArray() && qualifier.isArrayedIo(language)) {
1047             TType elementType(type, 0);
1048             size = computeTypeLocationSize(elementType, language);
1049         } else
1050             size = computeTypeLocationSize(type, language);
1051     }
1052 
1053     // Locations, and components within locations.
1054     //
1055     // Almost always, dealing with components means a single location is involved.
1056     // The exception is a dvec3. From the spec:
1057     //
1058     // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1059     // the second location. This leaves components 2 and 3 available for other component-qualified
1060     // declarations."
1061     //
1062     // That means, without ever mentioning a component, a component range
1063     // for a different location gets specified, if it's not a vertex shader input. (!)
1064     // (A vertex shader input will show using only one location, even for a dvec3/4.)
1065     //
1066     // So, for the case of dvec3, we need two independent ioRanges.
1067 
1068     int collision = -1; // no collision
1069     if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1070         (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1071         // Dealing with dvec3 in/out split across two locations.
1072         // Need two io-ranges.
1073         // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1074 
1075         // First range:
1076         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1077         TRange componentRange(0, 3);
1078         TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
1079 
1080         // check for collisions
1081         collision = checkLocationRange(set, range, type, typeCollision);
1082         if (collision < 0) {
1083             usedIo[set].push_back(range);
1084 
1085             // Second range:
1086             TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1087             TRange componentRange2(0, 1);
1088             TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
1089 
1090             // check for collisions
1091             collision = checkLocationRange(set, range2, type, typeCollision);
1092             if (collision < 0)
1093                 usedIo[set].push_back(range2);
1094         }
1095     } else {
1096         // Not a dvec3 in/out split across two locations, generic path.
1097         // Need a single IO-range block.
1098 
1099         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1100         TRange componentRange(0, 3);
1101         if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1102             int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1103             if (qualifier.hasComponent())
1104                 componentRange.start = qualifier.layoutComponent;
1105             componentRange.last  = componentRange.start + consumedComponents - 1;
1106         }
1107 
1108         // combine location and component ranges
1109         TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
1110 
1111         // check for collisions, except for vertex inputs on desktop targeting OpenGL
1112         if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1113             collision = checkLocationRange(set, range, type, typeCollision);
1114 
1115         if (collision < 0)
1116             usedIo[set].push_back(range);
1117     }
1118 
1119     return collision;
1120 }
1121 
1122 // Compare a new (the passed in) 'range' against the existing set, and see
1123 // if there are any collisions.
1124 //
1125 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1126 //
checkLocationRange(int set,const TIoRange & range,const TType & type,bool & typeCollision)1127 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1128 {
1129     for (size_t r = 0; r < usedIo[set].size(); ++r) {
1130         if (range.overlap(usedIo[set][r])) {
1131             // there is a collision; pick one
1132             return std::max(range.location.start, usedIo[set][r].location.start);
1133         } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
1134             // aliased-type mismatch
1135             typeCollision = true;
1136             return std::max(range.location.start, usedIo[set][r].location.start);
1137         }
1138     }
1139 
1140     return -1; // no collision
1141 }
1142 
1143 // Accumulate bindings and offsets, and check for collisions
1144 // as the accumulation is done.
1145 //
1146 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1147 //
addUsedOffsets(int binding,int offset,int numOffsets)1148 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1149 {
1150     TRange bindingRange(binding, binding);
1151     TRange offsetRange(offset, offset + numOffsets - 1);
1152     TOffsetRange range(bindingRange, offsetRange);
1153 
1154     // check for collisions, except for vertex inputs on desktop
1155     for (size_t r = 0; r < usedAtomics.size(); ++r) {
1156         if (range.overlap(usedAtomics[r])) {
1157             // there is a collision; pick one
1158             return std::max(offset, usedAtomics[r].offset.start);
1159         }
1160     }
1161 
1162     usedAtomics.push_back(range);
1163 
1164     return -1; // no collision
1165 }
1166 
1167 // Accumulate used constant_id values.
1168 //
1169 // Return false is one was already used.
addUsedConstantId(int id)1170 bool TIntermediate::addUsedConstantId(int id)
1171 {
1172     if (usedConstantId.find(id) != usedConstantId.end())
1173         return false;
1174 
1175     usedConstantId.insert(id);
1176 
1177     return true;
1178 }
1179 
1180 // Recursively figure out how many locations are used up by an input or output type.
1181 // Return the size of type, as measured by "locations".
computeTypeLocationSize(const TType & type,EShLanguage stage)1182 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1183 {
1184     // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1185     // consecutive locations..."
1186     if (type.isArray()) {
1187         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1188         // TODO: are there valid cases of having an unsized array with a location?  If so, running this code too early.
1189         TType elementType(type, 0);
1190         if (type.isSizedArray()
1191 #ifdef NV_EXTENSIONS
1192             && !type.getQualifier().isPerView()
1193 #endif
1194             )
1195             return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1196         else {
1197 #ifdef NV_EXTENSIONS
1198             // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
1199             elementType.getQualifier().perViewNV = false;
1200 #endif
1201             return computeTypeLocationSize(elementType, stage);
1202         }
1203     }
1204 
1205     // "The locations consumed by block and structure members are determined by applying the rules above
1206     // recursively..."
1207     if (type.isStruct()) {
1208         int size = 0;
1209         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1210             TType memberType(type, member);
1211             size += computeTypeLocationSize(memberType, stage);
1212         }
1213         return size;
1214     }
1215 
1216     // ES: "If a shader input is any scalar or vector type, it will consume a single location."
1217 
1218     // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
1219     // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
1220     // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
1221     // consume only a single location, in all stages."
1222     if (type.isScalar())
1223         return 1;
1224     if (type.isVector()) {
1225         if (stage == EShLangVertex && type.getQualifier().isPipeInput())
1226             return 1;
1227         if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
1228             return 2;
1229         else
1230             return 1;
1231     }
1232 
1233     // "If the declared input is an n x m single- or double-precision matrix, ...
1234     // The number of locations assigned for each matrix will be the same as
1235     // for an n-element array of m-component vectors..."
1236     if (type.isMatrix()) {
1237         TType columnType(type, 0);
1238         return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
1239     }
1240 
1241     assert(0);
1242     return 1;
1243 }
1244 
1245 // Same as computeTypeLocationSize but for uniforms
computeTypeUniformLocationSize(const TType & type)1246 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
1247 {
1248     // "Individual elements of a uniform array are assigned
1249     // consecutive locations with the first element taking location
1250     // location."
1251     if (type.isArray()) {
1252         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1253         TType elementType(type, 0);
1254         if (type.isSizedArray()) {
1255             return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
1256         } else {
1257             // TODO: are there valid cases of having an implicitly-sized array with a location?  If so, running this code too early.
1258             return computeTypeUniformLocationSize(elementType);
1259         }
1260     }
1261 
1262     // "Each subsequent inner-most member or element gets incremental
1263     // locations for the entire structure or array."
1264     if (type.isStruct()) {
1265         int size = 0;
1266         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1267             TType memberType(type, member);
1268             size += computeTypeUniformLocationSize(memberType);
1269         }
1270         return size;
1271     }
1272 
1273     return 1;
1274 }
1275 
1276 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
1277 //
1278 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1279 //
addXfbBufferOffset(const TType & type)1280 int TIntermediate::addXfbBufferOffset(const TType& type)
1281 {
1282     const TQualifier& qualifier = type.getQualifier();
1283 
1284     assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
1285     TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
1286 
1287     // compute the range
1288 #ifdef AMD_EXTENSIONS
1289     unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
1290 #else
1291     unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType);
1292 #endif
1293     buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
1294     TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
1295 
1296     // check for collisions
1297     for (size_t r = 0; r < buffer.ranges.size(); ++r) {
1298         if (range.overlap(buffer.ranges[r])) {
1299             // there is a collision; pick an example to return
1300             return std::max(range.start, buffer.ranges[r].start);
1301         }
1302     }
1303 
1304     buffer.ranges.push_back(range);
1305 
1306     return -1;  // no collision
1307 }
1308 
1309 // Recursively figure out how many bytes of xfb buffer are used by the given type.
1310 // Return the size of type, in bytes.
1311 // Sets contains64BitType to true if the type contains a 64-bit data type.
1312 #ifdef AMD_EXTENSIONS
1313 // Sets contains32BitType to true if the type contains a 32-bit data type.
1314 // Sets contains16BitType to true if the type contains a 16-bit data type.
1315 // N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
computeTypeXfbSize(const TType & type,bool & contains64BitType,bool & contains32BitType,bool & contains16BitType) const1316 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
1317 #else
1318 // N.B. Caller must set contains64BitType to false before calling.
1319 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType) const
1320 #endif
1321 {
1322     // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1323     // and the space taken in the buffer will be a multiple of 8.
1324     // ...within the qualified entity, subsequent components are each
1325     // assigned, in order, to the next available offset aligned to a multiple of
1326     // that component's size.  Aggregate types are flattened down to the component
1327     // level to get this sequence of components."
1328 
1329     if (type.isArray()) {
1330         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1331         assert(type.isSizedArray());
1332         TType elementType(type, 0);
1333 #ifdef AMD_EXTENSIONS
1334         return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
1335 #else
1336         return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType);
1337 #endif
1338     }
1339 
1340     if (type.isStruct()) {
1341         unsigned int size = 0;
1342         bool structContains64BitType = false;
1343 #ifdef AMD_EXTENSIONS
1344         bool structContains32BitType = false;
1345         bool structContains16BitType = false;
1346 #endif
1347         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1348             TType memberType(type, member);
1349             // "... if applied to
1350             // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1351             // and the space taken in the buffer will be a multiple of 8."
1352             bool memberContains64BitType = false;
1353 #ifdef AMD_EXTENSIONS
1354             bool memberContains32BitType = false;
1355             bool memberContains16BitType = false;
1356             int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
1357 #else
1358             int memberSize = computeTypeXfbSize(memberType, memberContains64BitType);
1359 #endif
1360             if (memberContains64BitType) {
1361                 structContains64BitType = true;
1362                 RoundToPow2(size, 8);
1363 #ifdef AMD_EXTENSIONS
1364             } else if (memberContains32BitType) {
1365                 structContains32BitType = true;
1366                 RoundToPow2(size, 4);
1367             } else if (memberContains16BitType) {
1368                 structContains16BitType = true;
1369                 RoundToPow2(size, 2);
1370 #endif
1371             }
1372             size += memberSize;
1373         }
1374 
1375         if (structContains64BitType) {
1376             contains64BitType = true;
1377             RoundToPow2(size, 8);
1378 #ifdef AMD_EXTENSIONS
1379         } else if (structContains32BitType) {
1380             contains32BitType = true;
1381             RoundToPow2(size, 4);
1382         } else if (structContains16BitType) {
1383             contains16BitType = true;
1384             RoundToPow2(size, 2);
1385 #endif
1386         }
1387         return size;
1388     }
1389 
1390     int numComponents;
1391     if (type.isScalar())
1392         numComponents = 1;
1393     else if (type.isVector())
1394         numComponents = type.getVectorSize();
1395     else if (type.isMatrix())
1396         numComponents = type.getMatrixCols() * type.getMatrixRows();
1397     else {
1398         assert(0);
1399         numComponents = 1;
1400     }
1401 
1402     if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
1403         contains64BitType = true;
1404         return 8 * numComponents;
1405 #ifdef AMD_EXTENSIONS
1406     } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
1407         contains16BitType = true;
1408         return 2 * numComponents;
1409     } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
1410         return numComponents;
1411     else {
1412         contains32BitType = true;
1413         return 4 * numComponents;
1414     }
1415 #else
1416     } else
1417         return 4 * numComponents;
1418 #endif
1419 }
1420 
1421 const int baseAlignmentVec4Std140 = 16;
1422 
1423 // Return the size and alignment of a component of the given type.
1424 // The size is returned in the 'size' parameter
1425 // Return value is the alignment..
getBaseAlignmentScalar(const TType & type,int & size)1426 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
1427 {
1428     switch (type.getBasicType()) {
1429     case EbtInt64:
1430     case EbtUint64:
1431     case EbtDouble:  size = 8; return 8;
1432     case EbtFloat16: size = 2; return 2;
1433     case EbtInt8:
1434     case EbtUint8:   size = 1; return 1;
1435     case EbtInt16:
1436     case EbtUint16:  size = 2; return 2;
1437     case EbtReference: size = 8; return 8;
1438     default:         size = 4; return 4;
1439     }
1440 }
1441 
1442 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
1443 // Operates recursively.
1444 //
1445 // If std140 is true, it does the rounding up to vec4 size required by std140,
1446 // otherwise it does not, yielding std430 rules.
1447 //
1448 // The size is returned in the 'size' parameter
1449 //
1450 // The stride is only non-0 for arrays or matrices, and is the stride of the
1451 // top-level object nested within the type.  E.g., for an array of matrices,
1452 // it is the distances needed between matrices, despite the rules saying the
1453 // stride comes from the flattening down to vectors.
1454 //
1455 // Return value is the alignment of the type.
getBaseAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1456 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1457 {
1458     int alignment;
1459 
1460     bool std140 = layoutPacking == glslang::ElpStd140;
1461     // When using the std140 storage layout, structures will be laid out in buffer
1462     // storage with its members stored in monotonically increasing order based on their
1463     // location in the declaration. A structure and each structure member have a base
1464     // offset and a base alignment, from which an aligned offset is computed by rounding
1465     // the base offset up to a multiple of the base alignment. The base offset of the first
1466     // member of a structure is taken from the aligned offset of the structure itself. The
1467     // base offset of all other structure members is derived by taking the offset of the
1468     // last basic machine unit consumed by the previous member and adding one. Each
1469     // structure member is stored in memory at its aligned offset. The members of a top-
1470     // level uniform block are laid out in buffer storage by treating the uniform block as
1471     // a structure with a base offset of zero.
1472     //
1473     //   1. If the member is a scalar consuming N basic machine units, the base alignment is N.
1474     //
1475     //   2. If the member is a two- or four-component vector with components consuming N basic
1476     //      machine units, the base alignment is 2N or 4N, respectively.
1477     //
1478     //   3. If the member is a three-component vector with components consuming N
1479     //      basic machine units, the base alignment is 4N.
1480     //
1481     //   4. If the member is an array of scalars or vectors, the base alignment and array
1482     //      stride are set to match the base alignment of a single array element, according
1483     //      to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
1484     //      array may have padding at the end; the base offset of the member following
1485     //      the array is rounded up to the next multiple of the base alignment.
1486     //
1487     //   5. If the member is a column-major matrix with C columns and R rows, the
1488     //      matrix is stored identically to an array of C column vectors with R
1489     //      components each, according to rule (4).
1490     //
1491     //   6. If the member is an array of S column-major matrices with C columns and
1492     //      R rows, the matrix is stored identically to a row of S X C column vectors
1493     //      with R components each, according to rule (4).
1494     //
1495     //   7. If the member is a row-major matrix with C columns and R rows, the matrix
1496     //      is stored identically to an array of R row vectors with C components each,
1497     //      according to rule (4).
1498     //
1499     //   8. If the member is an array of S row-major matrices with C columns and R
1500     //      rows, the matrix is stored identically to a row of S X R row vectors with C
1501     //      components each, according to rule (4).
1502     //
1503     //   9. If the member is a structure, the base alignment of the structure is N , where
1504     //      N is the largest base alignment value of any    of its members, and rounded
1505     //      up to the base alignment of a vec4. The individual members of this substructure
1506     //      are then assigned offsets by applying this set of rules recursively,
1507     //      where the base offset of the first member of the sub-structure is equal to the
1508     //      aligned offset of the structure. The structure may have padding at the end;
1509     //      the base offset of the member following the sub-structure is rounded up to
1510     //      the next multiple of the base alignment of the structure.
1511     //
1512     //   10. If the member is an array of S structures, the S elements of the array are laid
1513     //       out in order, according to rule (9).
1514     //
1515     //   Assuming, for rule 10:  The stride is the same as the size of an element.
1516 
1517     stride = 0;
1518     int dummyStride;
1519 
1520     // rules 4, 6, 8, and 10
1521     if (type.isArray()) {
1522         // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1523         TType derefType(type, 0);
1524         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1525         if (std140)
1526             alignment = std::max(baseAlignmentVec4Std140, alignment);
1527         RoundToPow2(size, alignment);
1528         stride = size;  // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
1529                         // uses the assumption for rule 10 in the comment above
1530         size = stride * type.getOuterArraySize();
1531         return alignment;
1532     }
1533 
1534     // rule 9
1535     if (type.getBasicType() == EbtStruct) {
1536         const TTypeList& memberList = *type.getStruct();
1537 
1538         size = 0;
1539         int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
1540         for (size_t m = 0; m < memberList.size(); ++m) {
1541             int memberSize;
1542             // modify just the children's view of matrix layout, if there is one for this member
1543             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1544             int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
1545                                                    (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1546             maxAlignment = std::max(maxAlignment, memberAlignment);
1547             RoundToPow2(size, memberAlignment);
1548             size += memberSize;
1549         }
1550 
1551         // The structure may have padding at the end; the base offset of
1552         // the member following the sub-structure is rounded up to the next
1553         // multiple of the base alignment of the structure.
1554         RoundToPow2(size, maxAlignment);
1555 
1556         return maxAlignment;
1557     }
1558 
1559     // rule 1
1560     if (type.isScalar())
1561         return getBaseAlignmentScalar(type, size);
1562 
1563     // rules 2 and 3
1564     if (type.isVector()) {
1565         int scalarAlign = getBaseAlignmentScalar(type, size);
1566         switch (type.getVectorSize()) {
1567         case 1: // HLSL has this, GLSL does not
1568             return scalarAlign;
1569         case 2:
1570             size *= 2;
1571             return 2 * scalarAlign;
1572         default:
1573             size *= type.getVectorSize();
1574             return 4 * scalarAlign;
1575         }
1576     }
1577 
1578     // rules 5 and 7
1579     if (type.isMatrix()) {
1580         // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
1581         TType derefType(type, 0, rowMajor);
1582 
1583         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1584         if (std140)
1585             alignment = std::max(baseAlignmentVec4Std140, alignment);
1586         RoundToPow2(size, alignment);
1587         stride = size;  // use intra-matrix stride for stride of a just a matrix
1588         if (rowMajor)
1589             size = stride * type.getMatrixRows();
1590         else
1591             size = stride * type.getMatrixCols();
1592 
1593         return alignment;
1594     }
1595 
1596     assert(0);  // all cases should be covered above
1597     size = baseAlignmentVec4Std140;
1598     return baseAlignmentVec4Std140;
1599 }
1600 
1601 // To aid the basic HLSL rule about crossing vec4 boundaries.
improperStraddle(const TType & type,int size,int offset)1602 bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
1603 {
1604     if (! type.isVector() || type.isArray())
1605         return false;
1606 
1607     return size <= 16 ? offset / 16 != (offset + size - 1) / 16
1608                       : offset % 16 != 0;
1609 }
1610 
getScalarAlignment(const TType & type,int & size,int & stride,bool rowMajor)1611 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
1612 {
1613     int alignment;
1614 
1615     stride = 0;
1616     int dummyStride;
1617 
1618     if (type.isArray()) {
1619         TType derefType(type, 0);
1620         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1621 
1622         stride = size;
1623         RoundToPow2(stride, alignment);
1624 
1625         size = stride * (type.getOuterArraySize() - 1) + size;
1626         return alignment;
1627     }
1628 
1629     if (type.getBasicType() == EbtStruct) {
1630         const TTypeList& memberList = *type.getStruct();
1631 
1632         size = 0;
1633         int maxAlignment = 0;
1634         for (size_t m = 0; m < memberList.size(); ++m) {
1635             int memberSize;
1636             // modify just the children's view of matrix layout, if there is one for this member
1637             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1638             int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
1639                                                      (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1640             maxAlignment = std::max(maxAlignment, memberAlignment);
1641             RoundToPow2(size, memberAlignment);
1642             size += memberSize;
1643         }
1644 
1645         return maxAlignment;
1646     }
1647 
1648     if (type.isScalar())
1649         return getBaseAlignmentScalar(type, size);
1650 
1651     if (type.isVector()) {
1652         int scalarAlign = getBaseAlignmentScalar(type, size);
1653 
1654         size *= type.getVectorSize();
1655         return scalarAlign;
1656     }
1657 
1658     if (type.isMatrix()) {
1659         TType derefType(type, 0, rowMajor);
1660 
1661         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1662 
1663         stride = size;  // use intra-matrix stride for stride of a just a matrix
1664         if (rowMajor)
1665             size = stride * type.getMatrixRows();
1666         else
1667             size = stride * type.getMatrixCols();
1668 
1669         return alignment;
1670     }
1671 
1672     assert(0);  // all cases should be covered above
1673     size = 1;
1674     return 1;
1675 }
1676 
getMemberAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1677 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1678 {
1679     if (layoutPacking == glslang::ElpScalar) {
1680         return getScalarAlignment(type, size, stride, rowMajor);
1681     } else {
1682         return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
1683     }
1684 }
1685 
1686 // shared calculation by getOffset and getOffsets
updateOffset(const TType & parentType,const TType & memberType,int & offset,int & memberSize)1687 void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
1688 {
1689     int dummyStride;
1690 
1691     // modify just the children's view of matrix layout, if there is one for this member
1692     TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
1693     int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
1694                                              parentType.getQualifier().layoutPacking,
1695                                              subMatrixLayout != ElmNone
1696                                                  ? subMatrixLayout == ElmRowMajor
1697                                                  : parentType.getQualifier().layoutMatrix == ElmRowMajor);
1698     RoundToPow2(offset, memberAlignment);
1699 }
1700 
1701 // Lookup or calculate the offset of a block member, using the recursively
1702 // defined block offset rules.
getOffset(const TType & type,int index)1703 int TIntermediate::getOffset(const TType& type, int index)
1704 {
1705     const TTypeList& memberList = *type.getStruct();
1706 
1707     // Don't calculate offset if one is present, it could be user supplied
1708     // and different than what would be calculated.  That is, this is faster,
1709     // but not just an optimization.
1710     if (memberList[index].type->getQualifier().hasOffset())
1711         return memberList[index].type->getQualifier().layoutOffset;
1712 
1713     int memberSize = 0;
1714     int offset = 0;
1715     for (int m = 0; m <= index; ++m) {
1716         updateOffset(type, *memberList[m].type, offset, memberSize);
1717 
1718         if (m < index)
1719             offset += memberSize;
1720     }
1721 
1722     return offset;
1723 }
1724 
1725 // Calculate the block data size.
1726 // Block arrayness is not taken into account, each element is backed by a separate buffer.
getBlockSize(const TType & blockType)1727 int TIntermediate::getBlockSize(const TType& blockType)
1728 {
1729     const TTypeList& memberList = *blockType.getStruct();
1730     int lastIndex = (int)memberList.size() - 1;
1731     int lastOffset = getOffset(blockType, lastIndex);
1732 
1733     int lastMemberSize;
1734     int dummyStride;
1735     getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
1736                        blockType.getQualifier().layoutPacking,
1737                        blockType.getQualifier().layoutMatrix == ElmRowMajor);
1738 
1739     return lastOffset + lastMemberSize;
1740 }
1741 
computeBufferReferenceTypeSize(const TType & type)1742 int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
1743 {
1744     assert(type.getBasicType() == EbtReference);
1745     int size = getBlockSize(*type.getReferentType());
1746 
1747     int align = type.getBufferReferenceAlignment();
1748 
1749     if (align) {
1750         size = (size + align - 1) & ~(align-1);
1751     }
1752 
1753     return size;
1754 }
1755 
1756 } // end namespace glslang
1757