1 //
2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 //    Redistributions of source code must retain the above copyright
13 //    notice, this list of conditions and the following disclaimer.
14 //
15 //    Redistributions in binary form must reproduce the above
16 //    copyright notice, this list of conditions and the following
17 //    disclaimer in the documentation and/or other materials provided
18 //    with the distribution.
19 //
20 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 //    contributors may be used to endorse or promote products derived
22 //    from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37 
38 //
39 // Do link-time merging and validation of intermediate representations.
40 //
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance.  Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage.  A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
47 //
48 
49 #include "localintermediate.h"
50 #include "../Include/InfoSink.h"
51 
52 namespace glslang {
53 
54 //
55 // Link-time error emitter.
56 //
error(TInfoSink & infoSink,const char * message)57 void TIntermediate::error(TInfoSink& infoSink, const char* message)
58 {
59 #ifndef GLSLANG_WEB
60     infoSink.info.prefix(EPrefixError);
61     infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
62 #endif
63 
64     ++numErrors;
65 }
66 
67 // Link-time warning.
warn(TInfoSink & infoSink,const char * message)68 void TIntermediate::warn(TInfoSink& infoSink, const char* message)
69 {
70 #ifndef GLSLANG_WEB
71     infoSink.info.prefix(EPrefixWarning);
72     infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
73 #endif
74 }
75 
76 // TODO: 4.4 offset/align:  "Two blocks linked together in the same program with the same block
77 // name must have the exact same set of members qualified with offset and their integral-constant
78 // expression values must be the same, or a link-time error results."
79 
80 //
81 // Merge the information from 'unit' into 'this'
82 //
merge(TInfoSink & infoSink,TIntermediate & unit)83 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
84 {
85 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
86     mergeCallGraphs(infoSink, unit);
87     mergeModes(infoSink, unit);
88     mergeTrees(infoSink, unit);
89 #endif
90 }
91 
mergeCallGraphs(TInfoSink & infoSink,TIntermediate & unit)92 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
93 {
94     if (unit.getNumEntryPoints() > 0) {
95         if (getNumEntryPoints() > 0)
96             error(infoSink, "can't handle multiple entry points per stage");
97         else {
98             entryPointName = unit.getEntryPointName();
99             entryPointMangledName = unit.getEntryPointMangledName();
100         }
101     }
102     numEntryPoints += unit.getNumEntryPoints();
103 
104     callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
105 }
106 
107 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
108 
109 #define MERGE_MAX(member) member = std::max(member, unit.member)
110 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
111 
mergeModes(TInfoSink & infoSink,TIntermediate & unit)112 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
113 {
114     if (language != unit.language)
115         error(infoSink, "stages must match when linking into a single stage");
116 
117     if (getSource() == EShSourceNone)
118         setSource(unit.getSource());
119     if (getSource() != unit.getSource())
120         error(infoSink, "can't link compilation units from different source languages");
121 
122     if (treeRoot == nullptr) {
123         profile = unit.profile;
124         version = unit.version;
125         requestedExtensions = unit.requestedExtensions;
126     } else {
127         if ((isEsProfile()) != (unit.isEsProfile()))
128             error(infoSink, "Cannot cross link ES and desktop profiles");
129         else if (unit.profile == ECompatibilityProfile)
130             profile = ECompatibilityProfile;
131         version = std::max(version, unit.version);
132         requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
133     }
134 
135     MERGE_MAX(spvVersion.spv);
136     MERGE_MAX(spvVersion.vulkanGlsl);
137     MERGE_MAX(spvVersion.vulkan);
138     MERGE_MAX(spvVersion.openGl);
139 
140     numErrors += unit.getNumErrors();
141     // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant
142     // is the same for all units.
143     if (numPushConstants > 1 || unit.numPushConstants > 1)
144         error(infoSink, "Only one push_constant block is allowed per stage");
145     numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1);
146 
147     if (unit.invocations != TQualifier::layoutNotSet) {
148         if (invocations == TQualifier::layoutNotSet)
149             invocations = unit.invocations;
150         else if (invocations != unit.invocations)
151             error(infoSink, "number of invocations must match between compilation units");
152     }
153 
154     if (vertices == TQualifier::layoutNotSet)
155         vertices = unit.vertices;
156     else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) {
157         if (language == EShLangGeometry || language == EShLangMeshNV)
158             error(infoSink, "Contradictory layout max_vertices values");
159         else if (language == EShLangTessControl)
160             error(infoSink, "Contradictory layout vertices values");
161         else
162             assert(0);
163     }
164     if (primitives == TQualifier::layoutNotSet)
165         primitives = unit.primitives;
166     else if (primitives != unit.primitives) {
167         if (language == EShLangMeshNV)
168             error(infoSink, "Contradictory layout max_primitives values");
169         else
170             assert(0);
171     }
172 
173     if (inputPrimitive == ElgNone)
174         inputPrimitive = unit.inputPrimitive;
175     else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive)
176         error(infoSink, "Contradictory input layout primitives");
177 
178     if (outputPrimitive == ElgNone)
179         outputPrimitive = unit.outputPrimitive;
180     else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive)
181         error(infoSink, "Contradictory output layout primitives");
182 
183     if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
184         error(infoSink, "gl_FragCoord redeclarations must match across shaders");
185 
186     if (vertexSpacing == EvsNone)
187         vertexSpacing = unit.vertexSpacing;
188     else if (vertexSpacing != unit.vertexSpacing)
189         error(infoSink, "Contradictory input vertex spacing");
190 
191     if (vertexOrder == EvoNone)
192         vertexOrder = unit.vertexOrder;
193     else if (vertexOrder != unit.vertexOrder)
194         error(infoSink, "Contradictory triangle ordering");
195 
196     MERGE_TRUE(pointMode);
197 
198     for (int i = 0; i < 3; ++i) {
199         if (unit.localSizeNotDefault[i]) {
200             if (!localSizeNotDefault[i]) {
201                 localSize[i] = unit.localSize[i];
202                 localSizeNotDefault[i] = true;
203             }
204             else if (localSize[i] != unit.localSize[i])
205                 error(infoSink, "Contradictory local size");
206         }
207 
208         if (localSizeSpecId[i] == TQualifier::layoutNotSet)
209             localSizeSpecId[i] = unit.localSizeSpecId[i];
210         else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
211             error(infoSink, "Contradictory local size specialization ids");
212     }
213 
214     MERGE_TRUE(earlyFragmentTests);
215     MERGE_TRUE(postDepthCoverage);
216 
217     if (depthLayout == EldNone)
218         depthLayout = unit.depthLayout;
219     else if (depthLayout != unit.depthLayout)
220         error(infoSink, "Contradictory depth layouts");
221 
222     MERGE_TRUE(depthReplacing);
223     MERGE_TRUE(hlslFunctionality1);
224 
225     blendEquations |= unit.blendEquations;
226 
227     MERGE_TRUE(xfbMode);
228 
229     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
230         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
231             xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
232         else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
233             error(infoSink, "Contradictory xfb_stride");
234         xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
235         if (unit.xfbBuffers[b].contains64BitType)
236             xfbBuffers[b].contains64BitType = true;
237         if (unit.xfbBuffers[b].contains32BitType)
238             xfbBuffers[b].contains32BitType = true;
239         if (unit.xfbBuffers[b].contains16BitType)
240             xfbBuffers[b].contains16BitType = true;
241         // TODO: 4.4 link: enhanced layouts: compare ranges
242     }
243 
244     MERGE_TRUE(multiStream);
245     MERGE_TRUE(layoutOverrideCoverage);
246     MERGE_TRUE(geoPassthroughEXT);
247 
248     for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
249         if (unit.shiftBinding[i] > 0)
250             setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
251     }
252 
253     for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
254         for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
255             setShiftBindingForSet((TResourceType)i, it->second, it->first);
256     }
257 
258     resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
259 
260     MERGE_TRUE(autoMapBindings);
261     MERGE_TRUE(autoMapLocations);
262     MERGE_TRUE(invertY);
263     MERGE_TRUE(flattenUniformArrays);
264     MERGE_TRUE(useUnknownFormat);
265     MERGE_TRUE(hlslOffsets);
266     MERGE_TRUE(useStorageBuffer);
267     MERGE_TRUE(hlslIoMapping);
268 
269     // TODO: sourceFile
270     // TODO: sourceText
271     // TODO: processes
272 
273     MERGE_TRUE(needToLegalize);
274     MERGE_TRUE(binaryDoubleOutput);
275     MERGE_TRUE(usePhysicalStorageBuffer);
276 }
277 
278 //
279 // Merge the 'unit' AST into 'this' AST.
280 // That includes rationalizing the unique IDs, which were set up independently,
281 // and might have overlaps that are not the same symbol, or might have different
282 // IDs for what should be the same shared symbol.
283 //
mergeTrees(TInfoSink & infoSink,TIntermediate & unit)284 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
285 {
286     if (unit.treeRoot == nullptr)
287         return;
288 
289     if (treeRoot == nullptr) {
290         treeRoot = unit.treeRoot;
291         return;
292     }
293 
294     // Getting this far means we have two existing trees to merge...
295     numShaderRecordBlocks += unit.numShaderRecordBlocks;
296     numTaskNVBlocks += unit.numTaskNVBlocks;
297 
298     // Get the top-level globals of each unit
299     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
300     TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
301 
302     // Get the linker-object lists
303     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
304     const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
305 
306     // Map by global name to unique ID to rationalize the same object having
307     // differing IDs in different trees.
308     TIdMaps idMaps;
309     int maxId;
310     seedIdMap(idMaps, maxId);
311     remapIds(idMaps, maxId + 1, unit);
312 
313     mergeBodies(infoSink, globals, unitGlobals);
314     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
315     ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
316 }
317 
318 #endif
319 
getNameForIdMap(TIntermSymbol * symbol)320 static const TString& getNameForIdMap(TIntermSymbol* symbol)
321 {
322     TShaderInterface si = symbol->getType().getShaderInterface();
323     if (si == EsiNone)
324         return symbol->getName();
325     else
326         return symbol->getType().getTypeName();
327 }
328 
329 
330 
331 // Traverser that seeds an ID map with all built-ins, and tracks the
332 // maximum ID used.
333 // (It would be nice to put this in a function, but that causes warnings
334 // on having no bodies for the copy-constructor/operator=.)
335 class TBuiltInIdTraverser : public TIntermTraverser {
336 public:
TBuiltInIdTraverser(TIdMaps & idMaps)337     TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), maxId(0) { }
338     // If it's a built in, add it to the map.
339     // Track the max ID.
visitSymbol(TIntermSymbol * symbol)340     virtual void visitSymbol(TIntermSymbol* symbol)
341     {
342         const TQualifier& qualifier = symbol->getType().getQualifier();
343         if (qualifier.builtIn != EbvNone) {
344             TShaderInterface si = symbol->getType().getShaderInterface();
345             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
346         }
347         maxId = std::max(maxId, symbol->getId());
348     }
getMaxId() const349     int getMaxId() const { return maxId; }
350 protected:
351     TBuiltInIdTraverser(TBuiltInIdTraverser&);
352     TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
353     TIdMaps& idMaps;
354     int maxId;
355 };
356 
357 // Traverser that seeds an ID map with non-builtins.
358 // (It would be nice to put this in a function, but that causes warnings
359 // on having no bodies for the copy-constructor/operator=.)
360 class TUserIdTraverser : public TIntermTraverser {
361 public:
TUserIdTraverser(TIdMaps & idMaps)362     TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { }
363     // If its a non-built-in global, add it to the map.
visitSymbol(TIntermSymbol * symbol)364     virtual void visitSymbol(TIntermSymbol* symbol)
365     {
366         const TQualifier& qualifier = symbol->getType().getQualifier();
367         if (qualifier.builtIn == EbvNone) {
368             TShaderInterface si = symbol->getType().getShaderInterface();
369             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
370         }
371     }
372 
373 protected:
374     TUserIdTraverser(TUserIdTraverser&);
375     TUserIdTraverser& operator=(TUserIdTraverser&);
376     TIdMaps& idMaps; // over biggest id
377 };
378 
379 // Initialize the the ID map with what we know of 'this' AST.
seedIdMap(TIdMaps & idMaps,int & maxId)380 void TIntermediate::seedIdMap(TIdMaps& idMaps, int& maxId)
381 {
382     // all built-ins everywhere need to align on IDs and contribute to the max ID
383     TBuiltInIdTraverser builtInIdTraverser(idMaps);
384     treeRoot->traverse(&builtInIdTraverser);
385     maxId = builtInIdTraverser.getMaxId();
386 
387     // user variables in the linker object list need to align on ids
388     TUserIdTraverser userIdTraverser(idMaps);
389     findLinkerObjects()->traverse(&userIdTraverser);
390 }
391 
392 // Traverser to map an AST ID to what was known from the seeding AST.
393 // (It would be nice to put this in a function, but that causes warnings
394 // on having no bodies for the copy-constructor/operator=.)
395 class TRemapIdTraverser : public TIntermTraverser {
396 public:
TRemapIdTraverser(const TIdMaps & idMaps,int idShift)397     TRemapIdTraverser(const TIdMaps& idMaps, int idShift) : idMaps(idMaps), idShift(idShift) { }
398     // Do the mapping:
399     //  - if the same symbol, adopt the 'this' ID
400     //  - otherwise, ensure a unique ID by shifting to a new space
visitSymbol(TIntermSymbol * symbol)401     virtual void visitSymbol(TIntermSymbol* symbol)
402     {
403         const TQualifier& qualifier = symbol->getType().getQualifier();
404         bool remapped = false;
405         if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
406             TShaderInterface si = symbol->getType().getShaderInterface();
407             auto it = idMaps[si].find(getNameForIdMap(symbol));
408             if (it != idMaps[si].end()) {
409                 symbol->changeId(it->second);
410                 remapped = true;
411             }
412         }
413         if (!remapped)
414             symbol->changeId(symbol->getId() + idShift);
415     }
416 protected:
417     TRemapIdTraverser(TRemapIdTraverser&);
418     TRemapIdTraverser& operator=(TRemapIdTraverser&);
419     const TIdMaps& idMaps;
420     int idShift;
421 };
422 
remapIds(const TIdMaps & idMaps,int idShift,TIntermediate & unit)423 void TIntermediate::remapIds(const TIdMaps& idMaps, int idShift, TIntermediate& unit)
424 {
425     // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
426     TRemapIdTraverser idTraverser(idMaps, idShift);
427     unit.getTreeRoot()->traverse(&idTraverser);
428 }
429 
430 //
431 // Merge the function bodies and global-level initializers from unitGlobals into globals.
432 // Will error check duplication of function bodies for the same signature.
433 //
mergeBodies(TInfoSink & infoSink,TIntermSequence & globals,const TIntermSequence & unitGlobals)434 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
435 {
436     // TODO: link-time performance: Processing in alphabetical order will be faster
437 
438     // Error check the global objects, not including the linker objects
439     for (unsigned int child = 0; child < globals.size() - 1; ++child) {
440         for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
441             TIntermAggregate* body = globals[child]->getAsAggregate();
442             TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
443             if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
444                 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
445                 infoSink.info << "    " << globals[child]->getAsAggregate()->getName() << "\n";
446             }
447         }
448     }
449 
450     // Merge the global objects, just in front of the linker objects
451     globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
452 }
453 
454 //
455 // Merge the linker objects from unitLinkerObjects into linkerObjects.
456 // Duplication is expected and filtered out, but contradictions are an error.
457 //
mergeLinkerObjects(TInfoSink & infoSink,TIntermSequence & linkerObjects,const TIntermSequence & unitLinkerObjects)458 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects)
459 {
460     // Error check and merge the linker objects (duplicates should not be created)
461     std::size_t initialNumLinkerObjects = linkerObjects.size();
462     for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
463         bool merge = true;
464         for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
465             TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
466             TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
467             assert(symbol && unitSymbol);
468 
469             bool isSameSymbol = false;
470             // If they are both blocks in the same shader interface,
471             // match by the block-name, not the identifier name.
472             if (symbol->getType().getBasicType() == EbtBlock && unitSymbol->getType().getBasicType() == EbtBlock) {
473                 if (symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) {
474                     isSameSymbol = symbol->getType().getTypeName() == unitSymbol->getType().getTypeName();
475                 }
476             }
477             else if (symbol->getName() == unitSymbol->getName())
478                 isSameSymbol = true;
479 
480             if (isSameSymbol) {
481                 // filter out copy
482                 merge = false;
483 
484                 // but if one has an initializer and the other does not, update
485                 // the initializer
486                 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
487                     symbol->setConstArray(unitSymbol->getConstArray());
488 
489                 // Similarly for binding
490                 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
491                     symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
492 
493                 // Update implicit array sizes
494                 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
495 
496                 // Check for consistent types/qualification/initializers etc.
497                 mergeErrorCheck(infoSink, *symbol, *unitSymbol, false);
498             }
499             // If different symbols, verify they arn't push_constant since there can only be one per stage
500             else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant())
501                 error(infoSink, "Only one push_constant block is allowed per stage");
502         }
503         if (merge)
504             linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
505     }
506 }
507 
508 // TODO 4.5 link functionality: cull distance array size checking
509 
510 // Recursively merge the implicit array sizes through the objects' respective type trees.
mergeImplicitArraySizes(TType & type,const TType & unitType)511 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
512 {
513     if (type.isUnsizedArray()) {
514         if (unitType.isUnsizedArray()) {
515             type.updateImplicitArraySize(unitType.getImplicitArraySize());
516             if (unitType.isArrayVariablyIndexed())
517                 type.setArrayVariablyIndexed();
518         } else if (unitType.isSizedArray())
519             type.changeOuterArraySize(unitType.getOuterArraySize());
520     }
521 
522     // Type mismatches are caught and reported after this, just be careful for now.
523     if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
524         return;
525 
526     for (int i = 0; i < (int)type.getStruct()->size(); ++i)
527         mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
528 }
529 
530 //
531 // Compare two global objects from two compilation units and see if they match
532 // well enough.  Rules can be different for intra- vs. cross-stage matching.
533 //
534 // This function only does one of intra- or cross-stage matching per call.
535 //
mergeErrorCheck(TInfoSink & infoSink,const TIntermSymbol & symbol,const TIntermSymbol & unitSymbol,bool crossStage)536 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
537 {
538 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
539     bool writeTypeComparison = false;
540 
541     // Types have to match
542     if (symbol.getType() != unitSymbol.getType()) {
543         // but, we make an exception if one is an implicit array and the other is sized
544         if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
545                 symbol.getType().sameElementType(unitSymbol.getType()) &&
546                 (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) {
547             error(infoSink, "Types must match:");
548             writeTypeComparison = true;
549         }
550     }
551 
552     // Qualifiers have to (almost) match
553 
554     // Storage...
555     if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
556         error(infoSink, "Storage qualifiers must match:");
557         writeTypeComparison = true;
558     }
559 
560     // Uniform and buffer blocks must either both have an instance name, or
561     // must both be anonymous. The names don't need to match though.
562     if (symbol.getQualifier().isUniformOrBuffer() &&
563         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) {
564         error(infoSink, "Matched Uniform or Storage blocks must all be anonymous,"
565                         " or all be named:");
566         writeTypeComparison = true;
567     }
568 
569     if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage &&
570         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) ||
571          (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) {
572         warn(infoSink, "Matched shader interfaces are using different instance names.");
573         writeTypeComparison = true;
574     }
575 
576     // Precision...
577     if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
578         error(infoSink, "Precision qualifiers must match:");
579         writeTypeComparison = true;
580     }
581 
582     // Invariance...
583     if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
584         error(infoSink, "Presence of invariant qualifier must match:");
585         writeTypeComparison = true;
586     }
587 
588     // Precise...
589     if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
590         error(infoSink, "Presence of precise qualifier must match:");
591         writeTypeComparison = true;
592     }
593 
594     // Auxiliary and interpolation...
595     if (symbol.getQualifier().centroid  != unitSymbol.getQualifier().centroid ||
596         symbol.getQualifier().smooth    != unitSymbol.getQualifier().smooth ||
597         symbol.getQualifier().flat      != unitSymbol.getQualifier().flat ||
598         symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
599         symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
600         symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) {
601         error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
602         writeTypeComparison = true;
603     }
604 
605     // Memory...
606     if (symbol.getQualifier().coherent          != unitSymbol.getQualifier().coherent ||
607         symbol.getQualifier().devicecoherent    != unitSymbol.getQualifier().devicecoherent ||
608         symbol.getQualifier().queuefamilycoherent  != unitSymbol.getQualifier().queuefamilycoherent ||
609         symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
610         symbol.getQualifier().subgroupcoherent  != unitSymbol.getQualifier().subgroupcoherent ||
611         symbol.getQualifier().shadercallcoherent!= unitSymbol.getQualifier().shadercallcoherent ||
612         symbol.getQualifier().nonprivate        != unitSymbol.getQualifier().nonprivate ||
613         symbol.getQualifier().volatil           != unitSymbol.getQualifier().volatil ||
614         symbol.getQualifier().restrict          != unitSymbol.getQualifier().restrict ||
615         symbol.getQualifier().readonly          != unitSymbol.getQualifier().readonly ||
616         symbol.getQualifier().writeonly         != unitSymbol.getQualifier().writeonly) {
617         error(infoSink, "Memory qualifiers must match:");
618         writeTypeComparison = true;
619     }
620 
621     // Layouts...
622     // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
623     //       requires separate user-supplied offset from actual computed offset, but
624     //       current implementation only has one offset.
625     if (symbol.getQualifier().layoutMatrix    != unitSymbol.getQualifier().layoutMatrix ||
626         symbol.getQualifier().layoutPacking   != unitSymbol.getQualifier().layoutPacking ||
627         symbol.getQualifier().layoutLocation  != unitSymbol.getQualifier().layoutLocation ||
628         symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
629         symbol.getQualifier().layoutIndex     != unitSymbol.getQualifier().layoutIndex ||
630         symbol.getQualifier().layoutBinding   != unitSymbol.getQualifier().layoutBinding ||
631         (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
632         error(infoSink, "Layout qualification must match:");
633         writeTypeComparison = true;
634     }
635 
636     // Initializers have to match, if both are present, and if we don't already know the types don't match
637     if (! writeTypeComparison) {
638         if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
639             if (symbol.getConstArray() != unitSymbol.getConstArray()) {
640                 error(infoSink, "Initializers must match:");
641                 infoSink.info << "    " << symbol.getName() << "\n";
642             }
643         }
644     }
645 
646     if (writeTypeComparison) {
647         infoSink.info << "    " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus ";
648         if (symbol.getName() != unitSymbol.getName())
649             infoSink.info << unitSymbol.getName() << ": ";
650 
651         infoSink.info << "\"" << unitSymbol.getType().getCompleteString() << "\"\n";
652     }
653 #endif
654 }
655 
sharedBlockCheck(TInfoSink & infoSink)656 void TIntermediate::sharedBlockCheck(TInfoSink& infoSink)
657 {
658     bool has_shared_block = false;
659     bool has_shared_non_block = false;
660     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
661     for (size_t i = 0; i < linkObjects.size(); ++i) {
662         const TType& type = linkObjects[i]->getAsTyped()->getType();
663         const TQualifier& qualifier = type.getQualifier();
664         if (qualifier.storage == glslang::EvqShared) {
665             if (type.getBasicType() == glslang::EbtBlock)
666                 has_shared_block = true;
667             else
668                 has_shared_non_block = true;
669         }
670     }
671     if (has_shared_block && has_shared_non_block)
672         error(infoSink, "cannot mix use of shared variables inside and outside blocks");
673 }
674 
675 //
676 // Do final link-time error checking of a complete (merged) intermediate representation.
677 // (Much error checking was done during merging).
678 //
679 // Also, lock in defaults of things not set, including array sizes.
680 //
finalCheck(TInfoSink & infoSink,bool keepUncalled)681 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
682 {
683     if (getTreeRoot() == nullptr)
684         return;
685 
686     if (numEntryPoints < 1) {
687         if (getSource() == EShSourceGlsl)
688             error(infoSink, "Missing entry point: Each stage requires one entry point");
689         else
690             warn(infoSink, "Entry point not found");
691     }
692 
693     // recursion and missing body checking
694     checkCallGraphCycles(infoSink);
695     checkCallGraphBodies(infoSink, keepUncalled);
696 
697     // overlap/alias/missing I/O, etc.
698     inOutLocationCheck(infoSink);
699 
700 #ifndef GLSLANG_WEB
701     if (getNumPushConstants() > 1)
702         error(infoSink, "Only one push_constant block is allowed per stage");
703 
704     // invocations
705     if (invocations == TQualifier::layoutNotSet)
706         invocations = 1;
707 
708     if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
709         error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
710     if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
711         error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
712 
713     if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
714         error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
715     if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
716         error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
717 
718     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
719         if (xfbBuffers[b].contains64BitType)
720             RoundToPow2(xfbBuffers[b].implicitStride, 8);
721         else if (xfbBuffers[b].contains32BitType)
722             RoundToPow2(xfbBuffers[b].implicitStride, 4);
723         else if (xfbBuffers[b].contains16BitType)
724             RoundToPow2(xfbBuffers[b].implicitStride, 2);
725 
726         // "It is a compile-time or link-time error to have
727         // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
728         // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
729         // compile-time or link-time error to have different values specified for the stride for the same buffer."
730         if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
731             error(infoSink, "xfb_stride is too small to hold all buffer entries:");
732             infoSink.info.prefix(EPrefixError);
733             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
734         }
735         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
736             xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
737 
738         // "If the buffer is capturing any
739         // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
740         // multiple of 4, or a compile-time or link-time error results."
741         if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
742             error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
743             infoSink.info.prefix(EPrefixError);
744             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
745         } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
746             error(infoSink, "xfb_stride must be multiple of 4:");
747             infoSink.info.prefix(EPrefixError);
748             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
749         }
750         // "If the buffer is capturing any
751         // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
752         else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
753             error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
754             infoSink.info.prefix(EPrefixError);
755             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
756         }
757 
758         // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
759         // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
760         if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) {
761             error(infoSink, "xfb_stride is too large:");
762             infoSink.info.prefix(EPrefixError);
763             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n";
764         }
765     }
766 
767     switch (language) {
768     case EShLangVertex:
769         break;
770     case EShLangTessControl:
771         if (vertices == TQualifier::layoutNotSet)
772             error(infoSink, "At least one shader must specify an output layout(vertices=...)");
773         break;
774     case EShLangTessEvaluation:
775         if (getSource() == EShSourceGlsl) {
776             if (inputPrimitive == ElgNone)
777                 error(infoSink, "At least one shader must specify an input layout primitive");
778             if (vertexSpacing == EvsNone)
779                 vertexSpacing = EvsEqual;
780             if (vertexOrder == EvoNone)
781                 vertexOrder = EvoCcw;
782         }
783         break;
784     case EShLangGeometry:
785         if (inputPrimitive == ElgNone)
786             error(infoSink, "At least one shader must specify an input layout primitive");
787         if (outputPrimitive == ElgNone)
788             error(infoSink, "At least one shader must specify an output layout primitive");
789         if (vertices == TQualifier::layoutNotSet)
790             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
791         break;
792     case EShLangFragment:
793         // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
794         // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
795         // requiring explicit early_fragment_tests
796         if (getPostDepthCoverage() && !getEarlyFragmentTests())
797             error(infoSink, "post_depth_coverage requires early_fragment_tests");
798         break;
799     case EShLangCompute:
800         sharedBlockCheck(infoSink);
801         break;
802     case EShLangRayGen:
803     case EShLangIntersect:
804     case EShLangAnyHit:
805     case EShLangClosestHit:
806     case EShLangMiss:
807     case EShLangCallable:
808         if (numShaderRecordBlocks > 1)
809             error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
810         break;
811     case EShLangMeshNV:
812         // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
813         if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
814             error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
815         if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
816             error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
817         if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
818             error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
819         if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
820             error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
821         if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
822             error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
823         if (outputPrimitive == ElgNone)
824             error(infoSink, "At least one shader must specify an output layout primitive");
825         if (vertices == TQualifier::layoutNotSet)
826             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
827         if (primitives == TQualifier::layoutNotSet)
828             error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
829         // fall through
830     case EShLangTaskNV:
831         if (numTaskNVBlocks > 1)
832             error(infoSink, "Only one taskNV interface block is allowed per shader");
833         sharedBlockCheck(infoSink);
834         break;
835     default:
836         error(infoSink, "Unknown Stage.");
837         break;
838     }
839 
840     // Process the tree for any node-specific work.
841     class TFinalLinkTraverser : public TIntermTraverser {
842     public:
843         TFinalLinkTraverser() { }
844         virtual ~TFinalLinkTraverser() { }
845 
846         virtual void visitSymbol(TIntermSymbol* symbol)
847         {
848             // Implicitly size arrays.
849             // If an unsized array is left as unsized, it effectively
850             // becomes run-time sized.
851             symbol->getWritableType().adoptImplicitArraySizes(false);
852         }
853     } finalLinkTraverser;
854 
855     treeRoot->traverse(&finalLinkTraverser);
856 #endif
857 }
858 
859 //
860 // See if the call graph contains any static recursion, which is disallowed
861 // by the specification.
862 //
checkCallGraphCycles(TInfoSink & infoSink)863 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
864 {
865     // Clear fields we'll use for this.
866     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
867         call->visited = false;
868         call->currentPath = false;
869         call->errorGiven = false;
870     }
871 
872     //
873     // Loop, looking for a new connected subgraph.  One subgraph is handled per loop iteration.
874     //
875 
876     TCall* newRoot;
877     do {
878         // See if we have unvisited parts of the graph.
879         newRoot = 0;
880         for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
881             if (! call->visited) {
882                 newRoot = &(*call);
883                 break;
884             }
885         }
886 
887         // If not, we are done.
888         if (! newRoot)
889             break;
890 
891         // Otherwise, we found a new subgraph, process it:
892         // See what all can be reached by this new root, and if any of
893         // that is recursive.  This is done by depth-first traversals, seeing
894         // if a new call is found that was already in the currentPath (a back edge),
895         // thereby detecting recursion.
896         std::list<TCall*> stack;
897         newRoot->currentPath = true; // currentPath will be true iff it is on the stack
898         stack.push_back(newRoot);
899         while (! stack.empty()) {
900             // get a caller
901             TCall* call = stack.back();
902 
903             // Add to the stack just one callee.
904             // This algorithm always terminates, because only !visited and !currentPath causes a push
905             // and all pushes change currentPath to true, and all pops change visited to true.
906             TGraph::iterator child = callGraph.begin();
907             for (; child != callGraph.end(); ++child) {
908 
909                 // If we already visited this node, its whole subgraph has already been processed, so skip it.
910                 if (child->visited)
911                     continue;
912 
913                 if (call->callee == child->caller) {
914                     if (child->currentPath) {
915                         // Then, we found a back edge
916                         if (! child->errorGiven) {
917                             error(infoSink, "Recursion detected:");
918                             infoSink.info << "    " << call->callee << " calling " << child->callee << "\n";
919                             child->errorGiven = true;
920                             recursive = true;
921                         }
922                     } else {
923                         child->currentPath = true;
924                         stack.push_back(&(*child));
925                         break;
926                     }
927                 }
928             }
929             if (child == callGraph.end()) {
930                 // no more callees, we bottomed out, never look at this node again
931                 stack.back()->currentPath = false;
932                 stack.back()->visited = true;
933                 stack.pop_back();
934             }
935         }  // end while, meaning nothing left to process in this subtree
936 
937     } while (newRoot);  // redundant loop check; should always exit via the 'break' above
938 }
939 
940 //
941 // See which functions are reachable from the entry point and which have bodies.
942 // Reachable ones with missing bodies are errors.
943 // Unreachable bodies are dead code.
944 //
checkCallGraphBodies(TInfoSink & infoSink,bool keepUncalled)945 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
946 {
947     // Clear fields we'll use for this.
948     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
949         call->visited = false;
950         call->calleeBodyPosition = -1;
951     }
952 
953     // The top level of the AST includes function definitions (bodies).
954     // Compare these to function calls in the call graph.
955     // We'll end up knowing which have bodies, and if so,
956     // how to map the call-graph node to the location in the AST.
957     TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
958     std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
959     for (int f = 0; f < (int)functionSequence.size(); ++f) {
960         glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
961         if (node && (node->getOp() == glslang::EOpFunction)) {
962             if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
963                 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
964             for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
965                 if (call->callee == node->getName())
966                     call->calleeBodyPosition = f;
967             }
968         }
969     }
970 
971     // Start call-graph traversal by visiting the entry point nodes.
972     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
973         if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
974             call->visited = true;
975     }
976 
977     // Propagate 'visited' through the call-graph to every part of the graph it
978     // can reach (seeded with the entry-point setting above).
979     bool changed;
980     do {
981         changed = false;
982         for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
983             if (call1->visited) {
984                 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
985                     if (! call2->visited) {
986                         if (call1->callee == call2->caller) {
987                             changed = true;
988                             call2->visited = true;
989                         }
990                     }
991                 }
992             }
993         }
994     } while (changed);
995 
996     // Any call-graph node set to visited but without a callee body is an error.
997     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
998         if (call->visited) {
999             if (call->calleeBodyPosition == -1) {
1000                 error(infoSink, "No function definition (body) found: ");
1001                 infoSink.info << "    " << call->callee << "\n";
1002             } else
1003                 reachable[call->calleeBodyPosition] = true;
1004         }
1005     }
1006 
1007     // Bodies in the AST not reached by the call graph are dead;
1008     // clear them out, since they can't be reached and also can't
1009     // be translated further due to possibility of being ill defined.
1010     if (! keepUncalled) {
1011         for (int f = 0; f < (int)functionSequence.size(); ++f) {
1012             if (! reachable[f])
1013                 functionSequence[f] = nullptr;
1014         }
1015         functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
1016     }
1017 }
1018 
1019 //
1020 // Satisfy rules for location qualifiers on inputs and outputs
1021 //
inOutLocationCheck(TInfoSink & infoSink)1022 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
1023 {
1024     // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
1025     bool fragOutWithNoLocation = false;
1026     int numFragOut = 0;
1027 
1028     // TODO: linker functionality: location collision checking
1029 
1030     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1031     for (size_t i = 0; i < linkObjects.size(); ++i) {
1032         const TType& type = linkObjects[i]->getAsTyped()->getType();
1033         const TQualifier& qualifier = type.getQualifier();
1034         if (language == EShLangFragment) {
1035             if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
1036                 ++numFragOut;
1037                 if (!qualifier.hasAnyLocation())
1038                     fragOutWithNoLocation = true;
1039             }
1040         }
1041     }
1042 
1043     if (isEsProfile()) {
1044         if (numFragOut > 1 && fragOutWithNoLocation)
1045             error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
1046     }
1047 }
1048 
findLinkerObjects() const1049 TIntermAggregate* TIntermediate::findLinkerObjects() const
1050 {
1051     // Get the top-level globals
1052     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
1053 
1054     // Get the last member of the sequences, expected to be the linker-object lists
1055     assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
1056 
1057     return globals.back()->getAsAggregate();
1058 }
1059 
1060 // See if a variable was both a user-declared output and used.
1061 // Note: the spec discusses writing to one, but this looks at read or write, which
1062 // is more useful, and perhaps the spec should be changed to reflect that.
userOutputUsed() const1063 bool TIntermediate::userOutputUsed() const
1064 {
1065     const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
1066 
1067     bool found = false;
1068     for (size_t i = 0; i < linkerObjects.size(); ++i) {
1069         const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
1070         if (symbolNode.getQualifier().storage == EvqVaryingOut &&
1071             symbolNode.getName().compare(0, 3, "gl_") != 0 &&
1072             inIoAccessed(symbolNode.getName())) {
1073             found = true;
1074             break;
1075         }
1076     }
1077 
1078     return found;
1079 }
1080 
1081 // Accumulate locations used for inputs, outputs, and uniforms, payload and callable data
1082 // and check for collisions as the accumulation is done.
1083 //
1084 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1085 //
1086 // typeCollision is set to true if there is no direct collision, but the types in the same location
1087 // are different.
1088 //
addUsedLocation(const TQualifier & qualifier,const TType & type,bool & typeCollision)1089 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
1090 {
1091     typeCollision = false;
1092 
1093     int set;
1094     int setRT;
1095     if (qualifier.isPipeInput())
1096         set = 0;
1097     else if (qualifier.isPipeOutput())
1098         set = 1;
1099     else if (qualifier.storage == EvqUniform)
1100         set = 2;
1101     else if (qualifier.storage == EvqBuffer)
1102         set = 3;
1103     else if (qualifier.isAnyPayload())
1104         setRT = 0;
1105     else if (qualifier.isAnyCallable())
1106         setRT = 1;
1107     else
1108         return -1;
1109 
1110     int size;
1111     if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1112         size = 1;
1113     } else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1114         if (type.isSizedArray())
1115             size = type.getCumulativeArraySize();
1116         else
1117             size = 1;
1118     } else {
1119         // Strip off the outer array dimension for those having an extra one.
1120         if (type.isArray() && qualifier.isArrayedIo(language)) {
1121             TType elementType(type, 0);
1122             size = computeTypeLocationSize(elementType, language);
1123         } else
1124             size = computeTypeLocationSize(type, language);
1125     }
1126 
1127     // Locations, and components within locations.
1128     //
1129     // Almost always, dealing with components means a single location is involved.
1130     // The exception is a dvec3. From the spec:
1131     //
1132     // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1133     // the second location. This leaves components 2 and 3 available for other component-qualified
1134     // declarations."
1135     //
1136     // That means, without ever mentioning a component, a component range
1137     // for a different location gets specified, if it's not a vertex shader input. (!)
1138     // (A vertex shader input will show using only one location, even for a dvec3/4.)
1139     //
1140     // So, for the case of dvec3, we need two independent ioRanges.
1141     //
1142     // For raytracing IO (payloads and callabledata) each declaration occupies a single
1143     // slot irrespective of type.
1144     int collision = -1; // no collision
1145 #ifndef GLSLANG_WEB
1146     if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1147         TRange range(qualifier.layoutLocation, qualifier.layoutLocation);
1148         collision = checkLocationRT(setRT, qualifier.layoutLocation);
1149         if (collision < 0)
1150             usedIoRT[setRT].push_back(range);
1151     } else if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1152         (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1153         // Dealing with dvec3 in/out split across two locations.
1154         // Need two io-ranges.
1155         // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1156 
1157         // First range:
1158         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1159         TRange componentRange(0, 3);
1160         TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
1161 
1162         // check for collisions
1163         collision = checkLocationRange(set, range, type, typeCollision);
1164         if (collision < 0) {
1165             usedIo[set].push_back(range);
1166 
1167             // Second range:
1168             TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1169             TRange componentRange2(0, 1);
1170             TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
1171 
1172             // check for collisions
1173             collision = checkLocationRange(set, range2, type, typeCollision);
1174             if (collision < 0)
1175                 usedIo[set].push_back(range2);
1176         }
1177     } else
1178 #endif
1179     {
1180         // Not a dvec3 in/out split across two locations, generic path.
1181         // Need a single IO-range block.
1182 
1183         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1184         TRange componentRange(0, 3);
1185         if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1186             int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1187             if (qualifier.hasComponent())
1188                 componentRange.start = qualifier.layoutComponent;
1189             componentRange.last  = componentRange.start + consumedComponents - 1;
1190         }
1191 
1192         // combine location and component ranges
1193         TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0);
1194 
1195         // check for collisions, except for vertex inputs on desktop targeting OpenGL
1196         if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1197             collision = checkLocationRange(set, range, type, typeCollision);
1198 
1199         if (collision < 0)
1200             usedIo[set].push_back(range);
1201     }
1202 
1203     return collision;
1204 }
1205 
1206 // Compare a new (the passed in) 'range' against the existing set, and see
1207 // if there are any collisions.
1208 //
1209 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1210 //
checkLocationRange(int set,const TIoRange & range,const TType & type,bool & typeCollision)1211 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1212 {
1213     for (size_t r = 0; r < usedIo[set].size(); ++r) {
1214         if (range.overlap(usedIo[set][r])) {
1215             // there is a collision; pick one
1216             return std::max(range.location.start, usedIo[set][r].location.start);
1217         } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
1218             // aliased-type mismatch
1219             typeCollision = true;
1220             return std::max(range.location.start, usedIo[set][r].location.start);
1221         }
1222     }
1223 
1224     return -1; // no collision
1225 }
1226 
checkLocationRT(int set,int location)1227 int TIntermediate::checkLocationRT(int set, int location) {
1228     TRange range(location, location);
1229     for (size_t r = 0; r < usedIoRT[set].size(); ++r) {
1230         if (range.overlap(usedIoRT[set][r])) {
1231             return range.start;
1232         }
1233     }
1234     return -1; // no collision
1235 }
1236 
1237 // Accumulate bindings and offsets, and check for collisions
1238 // as the accumulation is done.
1239 //
1240 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1241 //
addUsedOffsets(int binding,int offset,int numOffsets)1242 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1243 {
1244     TRange bindingRange(binding, binding);
1245     TRange offsetRange(offset, offset + numOffsets - 1);
1246     TOffsetRange range(bindingRange, offsetRange);
1247 
1248     // check for collisions, except for vertex inputs on desktop
1249     for (size_t r = 0; r < usedAtomics.size(); ++r) {
1250         if (range.overlap(usedAtomics[r])) {
1251             // there is a collision; pick one
1252             return std::max(offset, usedAtomics[r].offset.start);
1253         }
1254     }
1255 
1256     usedAtomics.push_back(range);
1257 
1258     return -1; // no collision
1259 }
1260 
1261 // Accumulate used constant_id values.
1262 //
1263 // Return false is one was already used.
addUsedConstantId(int id)1264 bool TIntermediate::addUsedConstantId(int id)
1265 {
1266     if (usedConstantId.find(id) != usedConstantId.end())
1267         return false;
1268 
1269     usedConstantId.insert(id);
1270 
1271     return true;
1272 }
1273 
1274 // Recursively figure out how many locations are used up by an input or output type.
1275 // Return the size of type, as measured by "locations".
computeTypeLocationSize(const TType & type,EShLanguage stage)1276 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1277 {
1278     // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1279     // consecutive locations..."
1280     if (type.isArray()) {
1281         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1282         // TODO: are there valid cases of having an unsized array with a location?  If so, running this code too early.
1283         TType elementType(type, 0);
1284         if (type.isSizedArray() && !type.getQualifier().isPerView())
1285             return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1286         else {
1287 #ifndef GLSLANG_WEB
1288             // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
1289             elementType.getQualifier().perViewNV = false;
1290 #endif
1291             return computeTypeLocationSize(elementType, stage);
1292         }
1293     }
1294 
1295     // "The locations consumed by block and structure members are determined by applying the rules above
1296     // recursively..."
1297     if (type.isStruct()) {
1298         int size = 0;
1299         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1300             TType memberType(type, member);
1301             size += computeTypeLocationSize(memberType, stage);
1302         }
1303         return size;
1304     }
1305 
1306     // ES: "If a shader input is any scalar or vector type, it will consume a single location."
1307 
1308     // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
1309     // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
1310     // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
1311     // consume only a single location, in all stages."
1312     if (type.isScalar())
1313         return 1;
1314     if (type.isVector()) {
1315         if (stage == EShLangVertex && type.getQualifier().isPipeInput())
1316             return 1;
1317         if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
1318             return 2;
1319         else
1320             return 1;
1321     }
1322 
1323     // "If the declared input is an n x m single- or double-precision matrix, ...
1324     // The number of locations assigned for each matrix will be the same as
1325     // for an n-element array of m-component vectors..."
1326     if (type.isMatrix()) {
1327         TType columnType(type, 0);
1328         return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
1329     }
1330 
1331     assert(0);
1332     return 1;
1333 }
1334 
1335 // Same as computeTypeLocationSize but for uniforms
computeTypeUniformLocationSize(const TType & type)1336 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
1337 {
1338     // "Individual elements of a uniform array are assigned
1339     // consecutive locations with the first element taking location
1340     // location."
1341     if (type.isArray()) {
1342         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1343         TType elementType(type, 0);
1344         if (type.isSizedArray()) {
1345             return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
1346         } else {
1347             // TODO: are there valid cases of having an implicitly-sized array with a location?  If so, running this code too early.
1348             return computeTypeUniformLocationSize(elementType);
1349         }
1350     }
1351 
1352     // "Each subsequent inner-most member or element gets incremental
1353     // locations for the entire structure or array."
1354     if (type.isStruct()) {
1355         int size = 0;
1356         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1357             TType memberType(type, member);
1358             size += computeTypeUniformLocationSize(memberType);
1359         }
1360         return size;
1361     }
1362 
1363     return 1;
1364 }
1365 
1366 #ifndef GLSLANG_WEB
1367 
1368 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
1369 //
1370 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1371 //
addXfbBufferOffset(const TType & type)1372 int TIntermediate::addXfbBufferOffset(const TType& type)
1373 {
1374     const TQualifier& qualifier = type.getQualifier();
1375 
1376     assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
1377     TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
1378 
1379     // compute the range
1380     unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
1381     buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
1382     TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
1383 
1384     // check for collisions
1385     for (size_t r = 0; r < buffer.ranges.size(); ++r) {
1386         if (range.overlap(buffer.ranges[r])) {
1387             // there is a collision; pick an example to return
1388             return std::max(range.start, buffer.ranges[r].start);
1389         }
1390     }
1391 
1392     buffer.ranges.push_back(range);
1393 
1394     return -1;  // no collision
1395 }
1396 
1397 // Recursively figure out how many bytes of xfb buffer are used by the given type.
1398 // Return the size of type, in bytes.
1399 // Sets contains64BitType to true if the type contains a 64-bit data type.
1400 // Sets contains32BitType to true if the type contains a 32-bit data type.
1401 // Sets contains16BitType to true if the type contains a 16-bit data type.
1402 // N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
computeTypeXfbSize(const TType & type,bool & contains64BitType,bool & contains32BitType,bool & contains16BitType) const1403 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
1404 {
1405     // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1406     // and the space taken in the buffer will be a multiple of 8.
1407     // ...within the qualified entity, subsequent components are each
1408     // assigned, in order, to the next available offset aligned to a multiple of
1409     // that component's size.  Aggregate types are flattened down to the component
1410     // level to get this sequence of components."
1411 
1412     if (type.isSizedArray()) {
1413         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1414         // Unsized array use to xfb should be a compile error.
1415         TType elementType(type, 0);
1416         return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
1417     }
1418 
1419     if (type.isStruct()) {
1420         unsigned int size = 0;
1421         bool structContains64BitType = false;
1422         bool structContains32BitType = false;
1423         bool structContains16BitType = false;
1424         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1425             TType memberType(type, member);
1426             // "... if applied to
1427             // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1428             // and the space taken in the buffer will be a multiple of 8."
1429             bool memberContains64BitType = false;
1430             bool memberContains32BitType = false;
1431             bool memberContains16BitType = false;
1432             int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
1433             if (memberContains64BitType) {
1434                 structContains64BitType = true;
1435                 RoundToPow2(size, 8);
1436             } else if (memberContains32BitType) {
1437                 structContains32BitType = true;
1438                 RoundToPow2(size, 4);
1439             } else if (memberContains16BitType) {
1440                 structContains16BitType = true;
1441                 RoundToPow2(size, 2);
1442             }
1443             size += memberSize;
1444         }
1445 
1446         if (structContains64BitType) {
1447             contains64BitType = true;
1448             RoundToPow2(size, 8);
1449         } else if (structContains32BitType) {
1450             contains32BitType = true;
1451             RoundToPow2(size, 4);
1452         } else if (structContains16BitType) {
1453             contains16BitType = true;
1454             RoundToPow2(size, 2);
1455         }
1456         return size;
1457     }
1458 
1459     int numComponents;
1460     if (type.isScalar())
1461         numComponents = 1;
1462     else if (type.isVector())
1463         numComponents = type.getVectorSize();
1464     else if (type.isMatrix())
1465         numComponents = type.getMatrixCols() * type.getMatrixRows();
1466     else {
1467         assert(0);
1468         numComponents = 1;
1469     }
1470 
1471     if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
1472         contains64BitType = true;
1473         return 8 * numComponents;
1474     } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
1475         contains16BitType = true;
1476         return 2 * numComponents;
1477     } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
1478         return numComponents;
1479     else {
1480         contains32BitType = true;
1481         return 4 * numComponents;
1482     }
1483 }
1484 
1485 #endif
1486 
1487 const int baseAlignmentVec4Std140 = 16;
1488 
1489 // Return the size and alignment of a component of the given type.
1490 // The size is returned in the 'size' parameter
1491 // Return value is the alignment..
getBaseAlignmentScalar(const TType & type,int & size)1492 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
1493 {
1494 #ifdef GLSLANG_WEB
1495     size = 4; return 4;
1496 #endif
1497 
1498     switch (type.getBasicType()) {
1499     case EbtInt64:
1500     case EbtUint64:
1501     case EbtDouble:  size = 8; return 8;
1502     case EbtFloat16: size = 2; return 2;
1503     case EbtInt8:
1504     case EbtUint8:   size = 1; return 1;
1505     case EbtInt16:
1506     case EbtUint16:  size = 2; return 2;
1507     case EbtReference: size = 8; return 8;
1508     default:         size = 4; return 4;
1509     }
1510 }
1511 
1512 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
1513 // Operates recursively.
1514 //
1515 // If std140 is true, it does the rounding up to vec4 size required by std140,
1516 // otherwise it does not, yielding std430 rules.
1517 //
1518 // The size is returned in the 'size' parameter
1519 //
1520 // The stride is only non-0 for arrays or matrices, and is the stride of the
1521 // top-level object nested within the type.  E.g., for an array of matrices,
1522 // it is the distances needed between matrices, despite the rules saying the
1523 // stride comes from the flattening down to vectors.
1524 //
1525 // Return value is the alignment of the type.
getBaseAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1526 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1527 {
1528     int alignment;
1529 
1530     bool std140 = layoutPacking == glslang::ElpStd140;
1531     // When using the std140 storage layout, structures will be laid out in buffer
1532     // storage with its members stored in monotonically increasing order based on their
1533     // location in the declaration. A structure and each structure member have a base
1534     // offset and a base alignment, from which an aligned offset is computed by rounding
1535     // the base offset up to a multiple of the base alignment. The base offset of the first
1536     // member of a structure is taken from the aligned offset of the structure itself. The
1537     // base offset of all other structure members is derived by taking the offset of the
1538     // last basic machine unit consumed by the previous member and adding one. Each
1539     // structure member is stored in memory at its aligned offset. The members of a top-
1540     // level uniform block are laid out in buffer storage by treating the uniform block as
1541     // a structure with a base offset of zero.
1542     //
1543     //   1. If the member is a scalar consuming N basic machine units, the base alignment is N.
1544     //
1545     //   2. If the member is a two- or four-component vector with components consuming N basic
1546     //      machine units, the base alignment is 2N or 4N, respectively.
1547     //
1548     //   3. If the member is a three-component vector with components consuming N
1549     //      basic machine units, the base alignment is 4N.
1550     //
1551     //   4. If the member is an array of scalars or vectors, the base alignment and array
1552     //      stride are set to match the base alignment of a single array element, according
1553     //      to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
1554     //      array may have padding at the end; the base offset of the member following
1555     //      the array is rounded up to the next multiple of the base alignment.
1556     //
1557     //   5. If the member is a column-major matrix with C columns and R rows, the
1558     //      matrix is stored identically to an array of C column vectors with R
1559     //      components each, according to rule (4).
1560     //
1561     //   6. If the member is an array of S column-major matrices with C columns and
1562     //      R rows, the matrix is stored identically to a row of S X C column vectors
1563     //      with R components each, according to rule (4).
1564     //
1565     //   7. If the member is a row-major matrix with C columns and R rows, the matrix
1566     //      is stored identically to an array of R row vectors with C components each,
1567     //      according to rule (4).
1568     //
1569     //   8. If the member is an array of S row-major matrices with C columns and R
1570     //      rows, the matrix is stored identically to a row of S X R row vectors with C
1571     //      components each, according to rule (4).
1572     //
1573     //   9. If the member is a structure, the base alignment of the structure is N , where
1574     //      N is the largest base alignment value of any    of its members, and rounded
1575     //      up to the base alignment of a vec4. The individual members of this substructure
1576     //      are then assigned offsets by applying this set of rules recursively,
1577     //      where the base offset of the first member of the sub-structure is equal to the
1578     //      aligned offset of the structure. The structure may have padding at the end;
1579     //      the base offset of the member following the sub-structure is rounded up to
1580     //      the next multiple of the base alignment of the structure.
1581     //
1582     //   10. If the member is an array of S structures, the S elements of the array are laid
1583     //       out in order, according to rule (9).
1584     //
1585     //   Assuming, for rule 10:  The stride is the same as the size of an element.
1586 
1587     stride = 0;
1588     int dummyStride;
1589 
1590     // rules 4, 6, 8, and 10
1591     if (type.isArray()) {
1592         // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1593         TType derefType(type, 0);
1594         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1595         if (std140)
1596             alignment = std::max(baseAlignmentVec4Std140, alignment);
1597         RoundToPow2(size, alignment);
1598         stride = size;  // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
1599                         // uses the assumption for rule 10 in the comment above
1600         // use one element to represent the last member of SSBO which is unsized array
1601         int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize();
1602         size = stride * arraySize;
1603         return alignment;
1604     }
1605 
1606     // rule 9
1607     if (type.getBasicType() == EbtStruct) {
1608         const TTypeList& memberList = *type.getStruct();
1609 
1610         size = 0;
1611         int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
1612         for (size_t m = 0; m < memberList.size(); ++m) {
1613             int memberSize;
1614             // modify just the children's view of matrix layout, if there is one for this member
1615             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1616             int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
1617                                                    (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1618             maxAlignment = std::max(maxAlignment, memberAlignment);
1619             RoundToPow2(size, memberAlignment);
1620             size += memberSize;
1621         }
1622 
1623         // The structure may have padding at the end; the base offset of
1624         // the member following the sub-structure is rounded up to the next
1625         // multiple of the base alignment of the structure.
1626         RoundToPow2(size, maxAlignment);
1627 
1628         return maxAlignment;
1629     }
1630 
1631     // rule 1
1632     if (type.isScalar())
1633         return getBaseAlignmentScalar(type, size);
1634 
1635     // rules 2 and 3
1636     if (type.isVector()) {
1637         int scalarAlign = getBaseAlignmentScalar(type, size);
1638         switch (type.getVectorSize()) {
1639         case 1: // HLSL has this, GLSL does not
1640             return scalarAlign;
1641         case 2:
1642             size *= 2;
1643             return 2 * scalarAlign;
1644         default:
1645             size *= type.getVectorSize();
1646             return 4 * scalarAlign;
1647         }
1648     }
1649 
1650     // rules 5 and 7
1651     if (type.isMatrix()) {
1652         // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
1653         TType derefType(type, 0, rowMajor);
1654 
1655         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1656         if (std140)
1657             alignment = std::max(baseAlignmentVec4Std140, alignment);
1658         RoundToPow2(size, alignment);
1659         stride = size;  // use intra-matrix stride for stride of a just a matrix
1660         if (rowMajor)
1661             size = stride * type.getMatrixRows();
1662         else
1663             size = stride * type.getMatrixCols();
1664 
1665         return alignment;
1666     }
1667 
1668     assert(0);  // all cases should be covered above
1669     size = baseAlignmentVec4Std140;
1670     return baseAlignmentVec4Std140;
1671 }
1672 
1673 // To aid the basic HLSL rule about crossing vec4 boundaries.
improperStraddle(const TType & type,int size,int offset)1674 bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
1675 {
1676     if (! type.isVector() || type.isArray())
1677         return false;
1678 
1679     return size <= 16 ? offset / 16 != (offset + size - 1) / 16
1680                       : offset % 16 != 0;
1681 }
1682 
getScalarAlignment(const TType & type,int & size,int & stride,bool rowMajor)1683 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
1684 {
1685     int alignment;
1686 
1687     stride = 0;
1688     int dummyStride;
1689 
1690     if (type.isArray()) {
1691         TType derefType(type, 0);
1692         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1693 
1694         stride = size;
1695         RoundToPow2(stride, alignment);
1696 
1697         size = stride * (type.getOuterArraySize() - 1) + size;
1698         return alignment;
1699     }
1700 
1701     if (type.getBasicType() == EbtStruct) {
1702         const TTypeList& memberList = *type.getStruct();
1703 
1704         size = 0;
1705         int maxAlignment = 0;
1706         for (size_t m = 0; m < memberList.size(); ++m) {
1707             int memberSize;
1708             // modify just the children's view of matrix layout, if there is one for this member
1709             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1710             int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
1711                                                      (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1712             maxAlignment = std::max(maxAlignment, memberAlignment);
1713             RoundToPow2(size, memberAlignment);
1714             size += memberSize;
1715         }
1716 
1717         return maxAlignment;
1718     }
1719 
1720     if (type.isScalar())
1721         return getBaseAlignmentScalar(type, size);
1722 
1723     if (type.isVector()) {
1724         int scalarAlign = getBaseAlignmentScalar(type, size);
1725 
1726         size *= type.getVectorSize();
1727         return scalarAlign;
1728     }
1729 
1730     if (type.isMatrix()) {
1731         TType derefType(type, 0, rowMajor);
1732 
1733         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
1734 
1735         stride = size;  // use intra-matrix stride for stride of a just a matrix
1736         if (rowMajor)
1737             size = stride * type.getMatrixRows();
1738         else
1739             size = stride * type.getMatrixCols();
1740 
1741         return alignment;
1742     }
1743 
1744     assert(0);  // all cases should be covered above
1745     size = 1;
1746     return 1;
1747 }
1748 
getMemberAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)1749 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1750 {
1751     if (layoutPacking == glslang::ElpScalar) {
1752         return getScalarAlignment(type, size, stride, rowMajor);
1753     } else {
1754         return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
1755     }
1756 }
1757 
1758 // shared calculation by getOffset and getOffsets
updateOffset(const TType & parentType,const TType & memberType,int & offset,int & memberSize)1759 void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
1760 {
1761     int dummyStride;
1762 
1763     // modify just the children's view of matrix layout, if there is one for this member
1764     TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
1765     int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
1766                                              parentType.getQualifier().layoutPacking,
1767                                              subMatrixLayout != ElmNone
1768                                                  ? subMatrixLayout == ElmRowMajor
1769                                                  : parentType.getQualifier().layoutMatrix == ElmRowMajor);
1770     RoundToPow2(offset, memberAlignment);
1771 }
1772 
1773 // Lookup or calculate the offset of a block member, using the recursively
1774 // defined block offset rules.
getOffset(const TType & type,int index)1775 int TIntermediate::getOffset(const TType& type, int index)
1776 {
1777     const TTypeList& memberList = *type.getStruct();
1778 
1779     // Don't calculate offset if one is present, it could be user supplied
1780     // and different than what would be calculated.  That is, this is faster,
1781     // but not just an optimization.
1782     if (memberList[index].type->getQualifier().hasOffset())
1783         return memberList[index].type->getQualifier().layoutOffset;
1784 
1785     int memberSize = 0;
1786     int offset = 0;
1787     for (int m = 0; m <= index; ++m) {
1788         updateOffset(type, *memberList[m].type, offset, memberSize);
1789 
1790         if (m < index)
1791             offset += memberSize;
1792     }
1793 
1794     return offset;
1795 }
1796 
1797 // Calculate the block data size.
1798 // Block arrayness is not taken into account, each element is backed by a separate buffer.
getBlockSize(const TType & blockType)1799 int TIntermediate::getBlockSize(const TType& blockType)
1800 {
1801     const TTypeList& memberList = *blockType.getStruct();
1802     int lastIndex = (int)memberList.size() - 1;
1803     int lastOffset = getOffset(blockType, lastIndex);
1804 
1805     int lastMemberSize;
1806     int dummyStride;
1807     getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
1808                        blockType.getQualifier().layoutPacking,
1809                        blockType.getQualifier().layoutMatrix == ElmRowMajor);
1810 
1811     return lastOffset + lastMemberSize;
1812 }
1813 
computeBufferReferenceTypeSize(const TType & type)1814 int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
1815 {
1816     assert(type.isReference());
1817     int size = getBlockSize(*type.getReferentType());
1818 
1819     int align = type.getBufferReferenceAlignment();
1820 
1821     if (align) {
1822         size = (size + align - 1) & ~(align-1);
1823     }
1824 
1825     return size;
1826 }
1827 
1828 } // end namespace glslang
1829