1 //
2 // Copyright (C) 2013-2016 LunarG, Inc.
3 //
4 // All rights reserved.
5 //
6 // Redistribution and use in source and binary forms, with or without
7 // modification, are permitted provided that the following conditions
8 // are met:
9 //
10 // Redistributions of source code must retain the above copyright
11 // notice, this list of conditions and the following disclaimer.
12 //
13 // Redistributions in binary form must reproduce the above
14 // copyright notice, this list of conditions and the following
15 // disclaimer in the documentation and/or other materials provided
16 // with the distribution.
17 //
18 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
19 // contributors may be used to endorse or promote products derived
20 // from this software without specific prior written permission.
21 //
22 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 // POSSIBILITY OF SUCH DAMAGE.
34 //
35
36 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
37
38 #include "../Include/Common.h"
39 #include "reflection.h"
40 #include "LiveTraverser.h"
41 #include "localintermediate.h"
42
43 #include "gl_types.h"
44
45 //
46 // Grow the reflection database through a friend traverser class of TReflection and a
47 // collection of functions to do a liveness traversal that note what uniforms are used
48 // in semantically non-dead code.
49 //
50 // Can be used multiple times, once per stage, to grow a program reflection.
51 //
52 // High-level algorithm for one stage:
53 //
54 // 1. Put the entry point on the list of live functions.
55 //
56 // 2. Traverse any live function, while skipping if-tests with a compile-time constant
57 // condition of false, and while adding any encountered function calls to the live
58 // function list.
59 //
60 // Repeat until the live function list is empty.
61 //
62 // 3. Add any encountered uniform variables and blocks to the reflection database.
63 //
64 // Can be attempted with a failed link, but will return false if recursion had been detected, or
65 // there wasn't exactly one entry point.
66 //
67
68 namespace glslang {
69
70 //
71 // The traverser: mostly pass through, except
72 // - processing binary nodes to see if they are dereferences of an aggregates to track
73 // - processing symbol nodes to see if they are non-aggregate objects to track
74 //
75 // This ignores semantically dead code by using TLiveTraverser.
76 //
77 // This is in the glslang namespace directly so it can be a friend of TReflection.
78 //
79
80 class TReflectionTraverser : public TIntermTraverser {
81 public:
TReflectionTraverser(const TIntermediate & i,TReflection & r)82 TReflectionTraverser(const TIntermediate& i, TReflection& r) :
83 TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
84
85 virtual bool visitBinary(TVisit, TIntermBinary* node);
86 virtual void visitSymbol(TIntermSymbol* base);
87
88 // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
89 // However, no dereference doesn't mean simple... it could be a complex aggregate.
addUniform(const TIntermSymbol & base)90 void addUniform(const TIntermSymbol& base)
91 {
92 if (processedDerefs.find(&base) == processedDerefs.end()) {
93 processedDerefs.insert(&base);
94
95 int blockIndex = -1;
96 int offset = -1;
97 TList<TIntermBinary*> derefs;
98 TString baseName = base.getName();
99
100 if (base.getType().getBasicType() == EbtBlock) {
101 offset = 0;
102 bool anonymous = IsAnonymous(baseName);
103 const TString& blockName = base.getType().getTypeName();
104
105 if (!anonymous)
106 baseName = blockName;
107 else
108 baseName = "";
109
110 blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
111 }
112
113 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
114 // the dereference change expected by blowUpActiveAggregate.
115 blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
116 base.getQualifier().storage, updateStageMasks);
117 }
118 }
119
addPipeIOVariable(const TIntermSymbol & base)120 void addPipeIOVariable(const TIntermSymbol& base)
121 {
122 if (processedDerefs.find(&base) == processedDerefs.end()) {
123 processedDerefs.insert(&base);
124
125 const TString &name = base.getName();
126 const TType &type = base.getType();
127 const bool input = base.getQualifier().isPipeInput();
128
129 TReflection::TMapIndexToReflection &ioItems =
130 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
131
132
133 TReflection::TNameToIndex &ioMapper =
134 input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
135
136 if (reflection.options & EShReflectionUnwrapIOBlocks) {
137 bool anonymous = IsAnonymous(name);
138
139 TString baseName;
140 if (type.getBasicType() == EbtBlock) {
141 baseName = anonymous ? TString() : type.getTypeName();
142 } else {
143 baseName = anonymous ? TString() : name;
144 }
145
146 // by convention if this is an arrayed block we ignore the array in the reflection
147 if (type.isArray() && type.getBasicType() == EbtBlock) {
148 blowUpIOAggregate(input, baseName, TType(type, 0));
149 } else {
150 blowUpIOAggregate(input, baseName, type);
151 }
152 } else {
153 TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
154 if (it == ioMapper.end()) {
155 // seperate pipe i/o params from uniforms and blocks
156 // in is only for input in first stage as out is only for last stage. check traverse in call stack.
157 ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
158 ioItems.push_back(
159 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
160 EShLanguageMask& stages = ioItems.back().stages;
161 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
162 } else {
163 EShLanguageMask& stages = ioItems[it->second].stages;
164 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
165 }
166 }
167 }
168 }
169
170 // Lookup or calculate the offset of all block members at once, using the recursively
171 // defined block offset rules.
getOffsets(const TType & type,TVector<int> & offsets)172 void getOffsets(const TType& type, TVector<int>& offsets)
173 {
174 const TTypeList& memberList = *type.getStruct();
175 int memberSize = 0;
176 int offset = 0;
177
178 for (size_t m = 0; m < offsets.size(); ++m) {
179 // if the user supplied an offset, snap to it now
180 if (memberList[m].type->getQualifier().hasOffset())
181 offset = memberList[m].type->getQualifier().layoutOffset;
182
183 // calculate the offset of the next member and align the current offset to this member
184 intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
185
186 // save the offset of this member
187 offsets[m] = offset;
188
189 // update for the next member
190 offset += memberSize;
191 }
192 }
193
194 // Calculate the stride of an array type
getArrayStride(const TType & baseType,const TType & type)195 int getArrayStride(const TType& baseType, const TType& type)
196 {
197 int dummySize;
198 int stride;
199
200 // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
201 if (type.getBasicType() == EbtBlock)
202 return 0;
203
204 TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
205 intermediate.getMemberAlignment(type, dummySize, stride,
206 baseType.getQualifier().layoutPacking,
207 subMatrixLayout != ElmNone
208 ? subMatrixLayout == ElmRowMajor
209 : baseType.getQualifier().layoutMatrix == ElmRowMajor);
210
211 return stride;
212 }
213
214 // count the total number of leaf members from iterating out of a block type
countAggregateMembers(const TType & parentType)215 int countAggregateMembers(const TType& parentType)
216 {
217 if (! parentType.isStruct())
218 return 1;
219
220 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
221
222 bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
223
224 const TTypeList &memberList = *parentType.getStruct();
225
226 int ret = 0;
227
228 for (size_t i = 0; i < memberList.size(); i++)
229 {
230 const TType &memberType = *memberList[i].type;
231 int numMembers = countAggregateMembers(memberType);
232 // for sized arrays of structs, apply logic to expand out the same as we would below in
233 // blowUpActiveAggregate
234 if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
235 if (! strictArraySuffix || ! blockParent)
236 numMembers *= memberType.getArraySizes()->getCumulativeSize();
237 }
238 ret += numMembers;
239 }
240
241 return ret;
242 }
243
244 // Traverse the provided deref chain, including the base, and
245 // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
246 // - recursively expand any variable array index in the middle of that traversal
247 // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
248 //
249 // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
250 // A value of 0 for arraySize will mean to use the full array's size.
blowUpActiveAggregate(const TType & baseType,const TString & baseName,const TList<TIntermBinary * > & derefs,TList<TIntermBinary * >::const_iterator deref,int offset,int blockIndex,int arraySize,int topLevelArraySize,int topLevelArrayStride,TStorageQualifier baseStorage,bool active)251 void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
252 TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
253 int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
254 {
255 // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
256 // Broadly:
257 // * arrays-of-structs always have a [x] suffix.
258 // * with array-of-struct variables in the root of a buffer block, only ever return [0].
259 // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
260 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
261
262 // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
263 bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
264
265 // process the part of the dereference chain that was explicit in the shader
266 TString name = baseName;
267 const TType* terminalType = &baseType;
268 for (; deref != derefs.end(); ++deref) {
269 TIntermBinary* visitNode = *deref;
270 terminalType = &visitNode->getType();
271 int index;
272 switch (visitNode->getOp()) {
273 case EOpIndexIndirect: {
274 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
275
276 if (topLevelArrayStride == 0)
277 topLevelArrayStride = stride;
278
279 // Visit all the indices of this array, and for each one add on the remaining dereferencing
280 for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
281 TString newBaseName = name;
282 if (terminalType->getBasicType() == EbtBlock) {}
283 else if (strictArraySuffix && blockParent)
284 newBaseName.append(TString("[0]"));
285 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
286 newBaseName.append(TString("[") + String(i) + "]");
287 TList<TIntermBinary*>::const_iterator nextDeref = deref;
288 ++nextDeref;
289 blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
290 topLevelArraySize, topLevelArrayStride, baseStorage, active);
291
292 if (offset >= 0)
293 offset += stride;
294 }
295
296 // it was all completed in the recursive calls above
297 return;
298 }
299 case EOpIndexDirect: {
300 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
301
302 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
303 if (terminalType->getBasicType() == EbtBlock) {}
304 else if (strictArraySuffix && blockParent)
305 name.append(TString("[0]"));
306 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
307 name.append(TString("[") + String(index) + "]");
308
309 if (offset >= 0)
310 offset += stride * index;
311 }
312
313 if (topLevelArrayStride == 0)
314 topLevelArrayStride = stride;
315
316 // expand top-level arrays in blocks with [0] suffix
317 if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
318 blockParent = false;
319 }
320 break;
321 }
322 case EOpIndexDirectStruct:
323 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
324 if (offset >= 0)
325 offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
326 if (name.size() > 0)
327 name.append(".");
328 name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
329
330 // expand non top-level arrays with [x] suffix
331 if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
332 {
333 blockParent = false;
334 }
335 break;
336 default:
337 break;
338 }
339 }
340
341 // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
342 if (! isReflectionGranularity(*terminalType)) {
343 // the base offset of this node, that children are relative to
344 int baseOffset = offset;
345
346 if (terminalType->isArray()) {
347 // Visit all the indices of this array, and for each one,
348 // fully explode the remaining aggregate to dereference
349
350 int stride = 0;
351 if (offset >= 0)
352 stride = getArrayStride(baseType, *terminalType);
353
354 int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
355
356 // for top-level arrays in blocks, only expand [0] to avoid explosion of items
357 if ((strictArraySuffix && blockParent) ||
358 ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
359 arrayIterateSize = 1;
360 }
361
362 if (topLevelArrayStride == 0)
363 topLevelArrayStride = stride;
364
365 for (int i = 0; i < arrayIterateSize; ++i) {
366 TString newBaseName = name;
367 if (terminalType->getBasicType() != EbtBlock)
368 newBaseName.append(TString("[") + String(i) + "]");
369 TType derefType(*terminalType, 0);
370 if (offset >= 0)
371 offset = baseOffset + stride * i;
372
373 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
374 topLevelArraySize, topLevelArrayStride, baseStorage, active);
375 }
376 } else {
377 // Visit all members of this aggregate, and for each one,
378 // fully explode the remaining aggregate to dereference
379 const TTypeList& typeList = *terminalType->getStruct();
380
381 TVector<int> memberOffsets;
382
383 if (baseOffset >= 0) {
384 memberOffsets.resize(typeList.size());
385 getOffsets(*terminalType, memberOffsets);
386 }
387
388 for (int i = 0; i < (int)typeList.size(); ++i) {
389 TString newBaseName = name;
390 if (newBaseName.size() > 0)
391 newBaseName.append(".");
392 newBaseName.append(typeList[i].type->getFieldName());
393 TType derefType(*terminalType, i);
394 if (offset >= 0)
395 offset = baseOffset + memberOffsets[i];
396
397 int arrayStride = topLevelArrayStride;
398 if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
399 derefType.isArray()) {
400 arrayStride = getArrayStride(baseType, derefType);
401 }
402
403 if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
404 topLevelArraySize = 1;
405
406 if (strictArraySuffix && blockParent) {
407 // if this member is an array, store the top-level array stride but start the explosion from
408 // the inner struct type.
409 if (derefType.isArray() && derefType.isStruct()) {
410 newBaseName.append("[0]");
411 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
412 blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
413 blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
414 }
415 else if (derefType.isArray()) {
416 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
417 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
418 0, dimSize, 0, terminalType->getQualifier().storage, false);
419 }
420 else {
421 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
422 0, 1, 0, terminalType->getQualifier().storage, false);
423 }
424 } else {
425 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
426 topLevelArraySize, arrayStride, baseStorage, active);
427 }
428 }
429 }
430
431 // it was all completed in the recursive calls above
432 return;
433 }
434
435 if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
436 name.append(TString("[0]"));
437 }
438
439 // Finally, add a full string to the reflection database, and update the array size if necessary.
440 // If the dereferenced entity to record is an array, compute the size and update the maximum size.
441
442 // there might not be a final array dereference, it could have been copied as an array object
443 if (arraySize == 0)
444 arraySize = mapToGlArraySize(*terminalType);
445
446 TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
447
448 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
449 if (it == reflection.nameToIndex.end()) {
450 int uniformIndex = (int)variables.size();
451 reflection.nameToIndex[name.c_str()] = uniformIndex;
452 variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
453 arraySize, blockIndex));
454 if (terminalType->isArray()) {
455 variables.back().arrayStride = getArrayStride(baseType, *terminalType);
456 if (topLevelArrayStride == 0)
457 topLevelArrayStride = variables.back().arrayStride;
458 }
459
460 if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
461 reflection.atomicCounterUniformIndices.push_back(uniformIndex);
462
463 variables.back().topLevelArraySize = topLevelArraySize;
464 variables.back().topLevelArrayStride = topLevelArrayStride;
465
466 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
467 EShLanguageMask& stages = variables.back().stages;
468 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
469 }
470 } else {
471 if (arraySize > 1) {
472 int& reflectedArraySize = variables[it->second].size;
473 reflectedArraySize = std::max(arraySize, reflectedArraySize);
474 }
475
476 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
477 EShLanguageMask& stages = variables[it->second].stages;
478 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
479 }
480 }
481 }
482
483 // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
blowUpIOAggregate(bool input,const TString & baseName,const TType & type)484 void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
485 {
486 TString name = baseName;
487
488 // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
489 if (! isReflectionGranularity(type)) {
490 if (type.isArray()) {
491 // Visit all the indices of this array, and for each one,
492 // fully explode the remaining aggregate to dereference
493 for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
494 TString newBaseName = name;
495 newBaseName.append(TString("[") + String(i) + "]");
496 TType derefType(type, 0);
497
498 blowUpIOAggregate(input, newBaseName, derefType);
499 }
500 } else {
501 // Visit all members of this aggregate, and for each one,
502 // fully explode the remaining aggregate to dereference
503 const TTypeList& typeList = *type.getStruct();
504
505 for (int i = 0; i < (int)typeList.size(); ++i) {
506 TString newBaseName = name;
507 if (newBaseName.size() > 0)
508 newBaseName.append(".");
509 newBaseName.append(typeList[i].type->getFieldName());
510 TType derefType(type, i);
511
512 blowUpIOAggregate(input, newBaseName, derefType);
513 }
514 }
515
516 // it was all completed in the recursive calls above
517 return;
518 }
519
520 if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
521 name.append(TString("[0]"));
522 }
523
524 TReflection::TMapIndexToReflection &ioItems =
525 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
526
527 std::string namespacedName = input ? "in " : "out ";
528 namespacedName += name.c_str();
529
530 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
531 if (it == reflection.nameToIndex.end()) {
532 reflection.nameToIndex[namespacedName] = (int)ioItems.size();
533 ioItems.push_back(
534 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
535
536 EShLanguageMask& stages = ioItems.back().stages;
537 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
538 } else {
539 EShLanguageMask& stages = ioItems[it->second].stages;
540 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
541 }
542 }
543
544 // Add a uniform dereference where blocks/struct/arrays are involved in the access.
545 // Handles the situation where the left node is at the correct or too coarse a
546 // granularity for reflection. (That is, further dereferences up the tree will be
547 // skipped.) Earlier dereferences, down the tree, will be handled
548 // at the same time, and logged to prevent reprocessing as the tree is traversed.
549 //
550 // Note: Other things like the following must be caught elsewhere:
551 // - a simple non-array, non-struct variable (no dereference even conceivable)
552 // - an aggregrate consumed en masse, without a dereference
553 //
554 // So, this code is for cases like
555 // - a struct/block dereferencing a member (whether the member is array or not)
556 // - an array of struct
557 // - structs/arrays containing the above
558 //
addDereferencedUniform(TIntermBinary * topNode)559 void addDereferencedUniform(TIntermBinary* topNode)
560 {
561 // See if too fine-grained to process (wait to get further down the tree)
562 const TType& leftType = topNode->getLeft()->getType();
563 if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
564 return;
565
566 // We have an array or structure or block dereference, see if it's a uniform
567 // based dereference (if not, skip it).
568 TIntermSymbol* base = findBase(topNode);
569 if (! base || ! base->getQualifier().isUniformOrBuffer())
570 return;
571
572 // See if we've already processed this (e.g., in the middle of something
573 // we did earlier), and if so skip it
574 if (processedDerefs.find(topNode) != processedDerefs.end())
575 return;
576
577 // Process this uniform dereference
578
579 int offset = -1;
580 int blockIndex = -1;
581 bool anonymous = false;
582
583 // See if we need to record the block itself
584 bool block = base->getBasicType() == EbtBlock;
585 if (block) {
586 offset = 0;
587 anonymous = IsAnonymous(base->getName());
588
589 const TString& blockName = base->getType().getTypeName();
590 TString baseName;
591
592 if (! anonymous)
593 baseName = blockName;
594
595 blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
596
597 if (reflection.options & EShReflectionAllBlockVariables) {
598 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
599 // the dereference change expected by blowUpActiveAggregate.
600 TList<TIntermBinary*> derefs;
601
602 // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
603 // expanding root arrays anyway, just start the iteration from the base block type.
604 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
605 base->getQualifier().storage, false);
606 }
607 }
608
609 // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
610 // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
611 TList<TIntermBinary*> derefs;
612 for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
613 if (isReflectionGranularity(visitNode->getLeft()->getType()))
614 continue;
615
616 derefs.push_front(visitNode);
617 processedDerefs.insert(visitNode);
618 }
619 processedDerefs.insert(base);
620
621 // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
622 int arraySize = 0;
623 if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
624 if (topNode->getOp() == EOpIndexDirect)
625 arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
626 }
627
628 // Put the dereference chain together, forward
629 TString baseName;
630 if (! anonymous) {
631 if (block)
632 baseName = base->getType().getTypeName();
633 else
634 baseName = base->getName();
635 }
636 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
637 base->getQualifier().storage, true);
638 }
639
addBlockName(const TString & name,const TType & type,int size)640 int addBlockName(const TString& name, const TType& type, int size)
641 {
642 int blockIndex = 0;
643 if (type.isArray()) {
644 TType derefType(type, 0);
645 for (int e = 0; e < type.getOuterArraySize(); ++e) {
646 int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
647 if (e == 0)
648 blockIndex = memberBlockIndex;
649 }
650 } else {
651 TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
652
653 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
654 if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
655 blockIndex = (int)blocks.size();
656 reflection.nameToIndex[name.c_str()] = blockIndex;
657 blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
658
659 blocks.back().numMembers = countAggregateMembers(type);
660
661 EShLanguageMask& stages = blocks.back().stages;
662 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
663 }
664 else {
665 blockIndex = it->second;
666
667 EShLanguageMask& stages = blocks[blockIndex].stages;
668 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
669 }
670 }
671
672 return blockIndex;
673 }
674
675 // Are we at a level in a dereference chain at which individual active uniform queries are made?
isReflectionGranularity(const TType & type)676 bool isReflectionGranularity(const TType& type)
677 {
678 return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
679 }
680
681 // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
682 // Return 0 if the topology does not fit this situation.
findBase(const TIntermBinary * node)683 TIntermSymbol* findBase(const TIntermBinary* node)
684 {
685 TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
686 if (base)
687 return base;
688 TIntermBinary* left = node->getLeft()->getAsBinaryNode();
689 if (! left)
690 return nullptr;
691
692 return findBase(left);
693 }
694
695 //
696 // Translate a glslang sampler type into the GL API #define number.
697 //
mapSamplerToGlType(TSampler sampler)698 int mapSamplerToGlType(TSampler sampler)
699 {
700 if (! sampler.image) {
701 // a sampler...
702 switch (sampler.type) {
703 case EbtFloat:
704 switch ((int)sampler.dim) {
705 case Esd1D:
706 switch ((int)sampler.shadow) {
707 case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
708 case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
709 }
710 case Esd2D:
711 switch ((int)sampler.ms) {
712 case false:
713 switch ((int)sampler.shadow) {
714 case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
715 case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
716 }
717 case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
718 }
719 case Esd3D:
720 return GL_SAMPLER_3D;
721 case EsdCube:
722 switch ((int)sampler.shadow) {
723 case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
724 case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
725 }
726 case EsdRect:
727 return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
728 case EsdBuffer:
729 return GL_SAMPLER_BUFFER;
730 }
731 case EbtFloat16:
732 switch ((int)sampler.dim) {
733 case Esd1D:
734 switch ((int)sampler.shadow) {
735 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
736 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
737 }
738 case Esd2D:
739 switch ((int)sampler.ms) {
740 case false:
741 switch ((int)sampler.shadow) {
742 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
743 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
744 }
745 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
746 }
747 case Esd3D:
748 return GL_FLOAT16_SAMPLER_3D_AMD;
749 case EsdCube:
750 switch ((int)sampler.shadow) {
751 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
752 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
753 }
754 case EsdRect:
755 return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
756 case EsdBuffer:
757 return GL_FLOAT16_SAMPLER_BUFFER_AMD;
758 }
759 case EbtInt:
760 switch ((int)sampler.dim) {
761 case Esd1D:
762 return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
763 case Esd2D:
764 switch ((int)sampler.ms) {
765 case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
766 case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
767 : GL_INT_SAMPLER_2D_MULTISAMPLE;
768 }
769 case Esd3D:
770 return GL_INT_SAMPLER_3D;
771 case EsdCube:
772 return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
773 case EsdRect:
774 return GL_INT_SAMPLER_2D_RECT;
775 case EsdBuffer:
776 return GL_INT_SAMPLER_BUFFER;
777 }
778 case EbtUint:
779 switch ((int)sampler.dim) {
780 case Esd1D:
781 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
782 case Esd2D:
783 switch ((int)sampler.ms) {
784 case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
785 case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
786 : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
787 }
788 case Esd3D:
789 return GL_UNSIGNED_INT_SAMPLER_3D;
790 case EsdCube:
791 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
792 case EsdRect:
793 return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
794 case EsdBuffer:
795 return GL_UNSIGNED_INT_SAMPLER_BUFFER;
796 }
797 default:
798 return 0;
799 }
800 } else {
801 // an image...
802 switch (sampler.type) {
803 case EbtFloat:
804 switch ((int)sampler.dim) {
805 case Esd1D:
806 return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
807 case Esd2D:
808 switch ((int)sampler.ms) {
809 case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
810 case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
811 }
812 case Esd3D:
813 return GL_IMAGE_3D;
814 case EsdCube:
815 return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
816 case EsdRect:
817 return GL_IMAGE_2D_RECT;
818 case EsdBuffer:
819 return GL_IMAGE_BUFFER;
820 }
821 case EbtFloat16:
822 switch ((int)sampler.dim) {
823 case Esd1D:
824 return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
825 case Esd2D:
826 switch ((int)sampler.ms) {
827 case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
828 case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
829 }
830 case Esd3D:
831 return GL_FLOAT16_IMAGE_3D_AMD;
832 case EsdCube:
833 return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
834 case EsdRect:
835 return GL_FLOAT16_IMAGE_2D_RECT_AMD;
836 case EsdBuffer:
837 return GL_FLOAT16_IMAGE_BUFFER_AMD;
838 }
839 case EbtInt:
840 switch ((int)sampler.dim) {
841 case Esd1D:
842 return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
843 case Esd2D:
844 switch ((int)sampler.ms) {
845 case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
846 case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
847 }
848 case Esd3D:
849 return GL_INT_IMAGE_3D;
850 case EsdCube:
851 return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
852 case EsdRect:
853 return GL_INT_IMAGE_2D_RECT;
854 case EsdBuffer:
855 return GL_INT_IMAGE_BUFFER;
856 }
857 case EbtUint:
858 switch ((int)sampler.dim) {
859 case Esd1D:
860 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
861 case Esd2D:
862 switch ((int)sampler.ms) {
863 case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
864 case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
865 : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
866 }
867 case Esd3D:
868 return GL_UNSIGNED_INT_IMAGE_3D;
869 case EsdCube:
870 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
871 case EsdRect:
872 return GL_UNSIGNED_INT_IMAGE_2D_RECT;
873 case EsdBuffer:
874 return GL_UNSIGNED_INT_IMAGE_BUFFER;
875 }
876 default:
877 return 0;
878 }
879 }
880 }
881
882 //
883 // Translate a glslang type into the GL API #define number.
884 // Ignores arrayness.
885 //
mapToGlType(const TType & type)886 int mapToGlType(const TType& type)
887 {
888 switch (type.getBasicType()) {
889 case EbtSampler:
890 return mapSamplerToGlType(type.getSampler());
891 case EbtStruct:
892 case EbtBlock:
893 case EbtVoid:
894 return 0;
895 default:
896 break;
897 }
898
899 if (type.isVector()) {
900 int offset = type.getVectorSize() - 2;
901 switch (type.getBasicType()) {
902 case EbtFloat: return GL_FLOAT_VEC2 + offset;
903 case EbtDouble: return GL_DOUBLE_VEC2 + offset;
904 case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
905 case EbtInt: return GL_INT_VEC2 + offset;
906 case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
907 case EbtInt64: return GL_INT64_ARB + offset;
908 case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset;
909 case EbtBool: return GL_BOOL_VEC2 + offset;
910 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
911 default: return 0;
912 }
913 }
914 if (type.isMatrix()) {
915 switch (type.getBasicType()) {
916 case EbtFloat:
917 switch (type.getMatrixCols()) {
918 case 2:
919 switch (type.getMatrixRows()) {
920 case 2: return GL_FLOAT_MAT2;
921 case 3: return GL_FLOAT_MAT2x3;
922 case 4: return GL_FLOAT_MAT2x4;
923 default: return 0;
924 }
925 case 3:
926 switch (type.getMatrixRows()) {
927 case 2: return GL_FLOAT_MAT3x2;
928 case 3: return GL_FLOAT_MAT3;
929 case 4: return GL_FLOAT_MAT3x4;
930 default: return 0;
931 }
932 case 4:
933 switch (type.getMatrixRows()) {
934 case 2: return GL_FLOAT_MAT4x2;
935 case 3: return GL_FLOAT_MAT4x3;
936 case 4: return GL_FLOAT_MAT4;
937 default: return 0;
938 }
939 }
940 case EbtDouble:
941 switch (type.getMatrixCols()) {
942 case 2:
943 switch (type.getMatrixRows()) {
944 case 2: return GL_DOUBLE_MAT2;
945 case 3: return GL_DOUBLE_MAT2x3;
946 case 4: return GL_DOUBLE_MAT2x4;
947 default: return 0;
948 }
949 case 3:
950 switch (type.getMatrixRows()) {
951 case 2: return GL_DOUBLE_MAT3x2;
952 case 3: return GL_DOUBLE_MAT3;
953 case 4: return GL_DOUBLE_MAT3x4;
954 default: return 0;
955 }
956 case 4:
957 switch (type.getMatrixRows()) {
958 case 2: return GL_DOUBLE_MAT4x2;
959 case 3: return GL_DOUBLE_MAT4x3;
960 case 4: return GL_DOUBLE_MAT4;
961 default: return 0;
962 }
963 }
964 case EbtFloat16:
965 switch (type.getMatrixCols()) {
966 case 2:
967 switch (type.getMatrixRows()) {
968 case 2: return GL_FLOAT16_MAT2_AMD;
969 case 3: return GL_FLOAT16_MAT2x3_AMD;
970 case 4: return GL_FLOAT16_MAT2x4_AMD;
971 default: return 0;
972 }
973 case 3:
974 switch (type.getMatrixRows()) {
975 case 2: return GL_FLOAT16_MAT3x2_AMD;
976 case 3: return GL_FLOAT16_MAT3_AMD;
977 case 4: return GL_FLOAT16_MAT3x4_AMD;
978 default: return 0;
979 }
980 case 4:
981 switch (type.getMatrixRows()) {
982 case 2: return GL_FLOAT16_MAT4x2_AMD;
983 case 3: return GL_FLOAT16_MAT4x3_AMD;
984 case 4: return GL_FLOAT16_MAT4_AMD;
985 default: return 0;
986 }
987 }
988 default:
989 return 0;
990 }
991 }
992 if (type.getVectorSize() == 1) {
993 switch (type.getBasicType()) {
994 case EbtFloat: return GL_FLOAT;
995 case EbtDouble: return GL_DOUBLE;
996 case EbtFloat16: return GL_FLOAT16_NV;
997 case EbtInt: return GL_INT;
998 case EbtUint: return GL_UNSIGNED_INT;
999 case EbtInt64: return GL_INT64_ARB;
1000 case EbtUint64: return GL_UNSIGNED_INT64_ARB;
1001 case EbtBool: return GL_BOOL;
1002 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
1003 default: return 0;
1004 }
1005 }
1006
1007 return 0;
1008 }
1009
mapToGlArraySize(const TType & type)1010 int mapToGlArraySize(const TType& type)
1011 {
1012 return type.isArray() ? type.getOuterArraySize() : 1;
1013 }
1014
1015 const TIntermediate& intermediate;
1016 TReflection& reflection;
1017 std::set<const TIntermNode*> processedDerefs;
1018 bool updateStageMasks;
1019
1020 protected:
1021 TReflectionTraverser(TReflectionTraverser&);
1022 TReflectionTraverser& operator=(TReflectionTraverser&);
1023 };
1024
1025 //
1026 // Implement the traversal functions of interest.
1027 //
1028
1029 // To catch dereferenced aggregates that must be reflected.
1030 // This catches them at the highest level possible in the tree.
visitBinary(TVisit,TIntermBinary * node)1031 bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
1032 {
1033 switch (node->getOp()) {
1034 case EOpIndexDirect:
1035 case EOpIndexIndirect:
1036 case EOpIndexDirectStruct:
1037 addDereferencedUniform(node);
1038 break;
1039 default:
1040 break;
1041 }
1042
1043 // still need to visit everything below, which could contain sub-expressions
1044 // containing different uniforms
1045 return true;
1046 }
1047
1048 // To reflect non-dereferenced objects.
visitSymbol(TIntermSymbol * base)1049 void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
1050 {
1051 if (base->getQualifier().storage == EvqUniform) {
1052 if (base->getBasicType() == EbtBlock) {
1053 if (reflection.options & EShReflectionSharedStd140UBO) {
1054 addUniform(*base);
1055 }
1056 } else {
1057 addUniform(*base);
1058 }
1059 }
1060
1061 // #TODO add std140/layout active rules for ssbo, same with ubo.
1062 // Storage buffer blocks will be collected and expanding in this part.
1063 if((reflection.options & EShReflectionSharedStd140SSBO) &&
1064 (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
1065 (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
1066 addUniform(*base);
1067
1068 if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
1069 (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
1070 addPipeIOVariable(*base);
1071 }
1072
1073 //
1074 // Implement TObjectReflection methods.
1075 //
1076
TObjectReflection(const std::string & pName,const TType & pType,int pOffset,int pGLDefineType,int pSize,int pIndex)1077 TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
1078 int pSize, int pIndex)
1079 : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
1080 numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
1081 {
1082 }
1083
getBinding() const1084 int TObjectReflection::getBinding() const
1085 {
1086 if (type == nullptr || !type->getQualifier().hasBinding())
1087 return -1;
1088 return type->getQualifier().layoutBinding;
1089 }
1090
dump() const1091 void TObjectReflection::dump() const
1092 {
1093 printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
1094 index, getBinding(), stages);
1095
1096 if (counterIndex != -1)
1097 printf(", counter %d", counterIndex);
1098
1099 if (numMembers != -1)
1100 printf(", numMembers %d", numMembers);
1101
1102 if (arrayStride != 0)
1103 printf(", arrayStride %d", arrayStride);
1104
1105 if (topLevelArrayStride != 0)
1106 printf(", topLevelArrayStride %d", topLevelArrayStride);
1107
1108 printf("\n");
1109 }
1110
1111 //
1112 // Implement TReflection methods.
1113 //
1114
1115 // Track any required attribute reflection, such as compute shader numthreads.
1116 //
buildAttributeReflection(EShLanguage stage,const TIntermediate & intermediate)1117 void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
1118 {
1119 if (stage == EShLangCompute) {
1120 // Remember thread dimensions
1121 for (int dim=0; dim<3; ++dim)
1122 localSize[dim] = intermediate.getLocalSize(dim);
1123 }
1124 }
1125
1126 // build counter block index associations for buffers
buildCounterIndices(const TIntermediate & intermediate)1127 void TReflection::buildCounterIndices(const TIntermediate& intermediate)
1128 {
1129 #ifdef ENABLE_HLSL
1130 // search for ones that have counters
1131 for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
1132 const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
1133 const int index = getIndex(counterName);
1134
1135 if (index >= 0)
1136 indexToUniformBlock[i].counterIndex = index;
1137 }
1138 #endif
1139 }
1140
1141 // build Shader Stages mask for all uniforms
buildUniformStageMask(const TIntermediate & intermediate)1142 void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
1143 {
1144 if (options & EShReflectionAllBlockVariables)
1145 return;
1146
1147 for (int i = 0; i < int(indexToUniform.size()); ++i) {
1148 indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
1149 }
1150
1151 for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
1152 indexToBufferVariable[i].stages =
1153 static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
1154 }
1155 }
1156
1157 // Merge live symbols from 'intermediate' into the existing reflection database.
1158 //
1159 // Returns false if the input is too malformed to do this.
addStage(EShLanguage stage,const TIntermediate & intermediate)1160 bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
1161 {
1162 if (intermediate.getTreeRoot() == nullptr ||
1163 intermediate.getNumEntryPoints() != 1 ||
1164 intermediate.isRecursive())
1165 return false;
1166
1167 buildAttributeReflection(stage, intermediate);
1168
1169 TReflectionTraverser it(intermediate, *this);
1170
1171 for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
1172 if (sequnence->getAsAggregate() != nullptr) {
1173 if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
1174 it.updateStageMasks = false;
1175 TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
1176 for (auto& sequnence : linkerObjects->getSequence()) {
1177 auto pNode = sequnence->getAsSymbolNode();
1178 if (pNode != nullptr) {
1179 if ((pNode->getQualifier().storage == EvqUniform &&
1180 (options & EShReflectionSharedStd140UBO)) ||
1181 (pNode->getQualifier().storage == EvqBuffer &&
1182 (options & EShReflectionSharedStd140SSBO))) {
1183 // collect std140 and shared uniform block form AST
1184 if ((pNode->getBasicType() == EbtBlock) &&
1185 ((pNode->getQualifier().layoutPacking == ElpStd140) ||
1186 (pNode->getQualifier().layoutPacking == ElpShared))) {
1187 pNode->traverse(&it);
1188 }
1189 }
1190 else if ((options & EShReflectionAllIOVariables) &&
1191 (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
1192 {
1193 pNode->traverse(&it);
1194 }
1195 }
1196 }
1197 } else {
1198 // This traverser will travers all function in AST.
1199 // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
1200 // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
1201 // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
1202 //
1203 // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
1204 // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
1205 // So, travers all function node can equivalent to travers live function.
1206 it.updateStageMasks = true;
1207 sequnence->getAsAggregate()->traverse(&it);
1208 }
1209 }
1210 }
1211 it.updateStageMasks = true;
1212
1213 buildCounterIndices(intermediate);
1214 buildUniformStageMask(intermediate);
1215
1216 return true;
1217 }
1218
dump()1219 void TReflection::dump()
1220 {
1221 printf("Uniform reflection:\n");
1222 for (size_t i = 0; i < indexToUniform.size(); ++i)
1223 indexToUniform[i].dump();
1224 printf("\n");
1225
1226 printf("Uniform block reflection:\n");
1227 for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
1228 indexToUniformBlock[i].dump();
1229 printf("\n");
1230
1231 printf("Buffer variable reflection:\n");
1232 for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
1233 indexToBufferVariable[i].dump();
1234 printf("\n");
1235
1236 printf("Buffer block reflection:\n");
1237 for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
1238 indexToBufferBlock[i].dump();
1239 printf("\n");
1240
1241 printf("Pipeline input reflection:\n");
1242 for (size_t i = 0; i < indexToPipeInput.size(); ++i)
1243 indexToPipeInput[i].dump();
1244 printf("\n");
1245
1246 printf("Pipeline output reflection:\n");
1247 for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
1248 indexToPipeOutput[i].dump();
1249 printf("\n");
1250
1251 if (getLocalSize(0) > 1) {
1252 static const char* axis[] = { "X", "Y", "Z" };
1253
1254 for (int dim=0; dim<3; ++dim)
1255 if (getLocalSize(dim) > 1)
1256 printf("Local size %s: %u\n", axis[dim], getLocalSize(dim));
1257
1258 printf("\n");
1259 }
1260
1261 // printf("Live names\n");
1262 // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
1263 // printf("%s: %d\n", it->first.c_str(), it->second);
1264 // printf("\n");
1265 }
1266
1267 } // end namespace glslang
1268
1269 #endif // !GLSLANG_WEB && !GLSLANG_ANGLE
1270