1 //
2 // Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2016 LunarG, Inc.
4 // Copyright (C) 2017 ARM Limited.
5 // Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 //
7 // All rights reserved.
8 //
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
11 // are met:
12 //
13 //    Redistributions of source code must retain the above copyright
14 //    notice, this list of conditions and the following disclaimer.
15 //
16 //    Redistributions in binary form must reproduce the above
17 //    copyright notice, this list of conditions and the following
18 //    disclaimer in the documentation and/or other materials provided
19 //    with the distribution.
20 //
21 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 //    contributors may be used to endorse or promote products derived
23 //    from this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
37 //
38 
39 //
40 // Definition of the in-memory high-level intermediate representation
41 // of shaders.  This is a tree that parser creates.
42 //
43 // Nodes in the tree are defined as a hierarchy of classes derived from
44 // TIntermNode. Each is a node in a tree.  There is no preset branching factor;
45 // each node can have it's own type of list of children.
46 //
47 
48 #ifndef __INTERMEDIATE_H
49 #define __INTERMEDIATE_H
50 
51 #if defined(_MSC_VER) && _MSC_VER >= 1900
52     #pragma warning(disable : 4464) // relative include path contains '..'
53     #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
54 #endif
55 
56 #include "../Include/Common.h"
57 #include "../Include/Types.h"
58 #include "../Include/ConstantUnion.h"
59 
60 namespace glslang {
61 
62 class TIntermediate;
63 
64 //
65 // Operators used by the high-level (parse tree) representation.
66 //
67 enum TOperator {
68     EOpNull,            // if in a node, should only mean a node is still being built
69     EOpSequence,        // denotes a list of statements, or parameters, etc.
70     EOpLinkerObjects,   // for aggregate node of objects the linker may need, if not reference by the rest of the AST
71     EOpFunctionCall,
72     EOpFunction,        // For function definition
73     EOpParameters,      // an aggregate listing the parameters to a function
74 
75     //
76     // Unary operators
77     //
78 
79     EOpNegative,
80     EOpLogicalNot,
81     EOpVectorLogicalNot,
82     EOpBitwiseNot,
83 
84     EOpPostIncrement,
85     EOpPostDecrement,
86     EOpPreIncrement,
87     EOpPreDecrement,
88 
89     EOpCopyObject,
90 
91     // (u)int* -> bool
92     EOpConvInt8ToBool,
93     EOpConvUint8ToBool,
94     EOpConvInt16ToBool,
95     EOpConvUint16ToBool,
96     EOpConvIntToBool,
97     EOpConvUintToBool,
98     EOpConvInt64ToBool,
99     EOpConvUint64ToBool,
100 
101     // float* -> bool
102     EOpConvFloat16ToBool,
103     EOpConvFloatToBool,
104     EOpConvDoubleToBool,
105 
106     // bool -> (u)int*
107     EOpConvBoolToInt8,
108     EOpConvBoolToUint8,
109     EOpConvBoolToInt16,
110     EOpConvBoolToUint16,
111     EOpConvBoolToInt,
112     EOpConvBoolToUint,
113     EOpConvBoolToInt64,
114     EOpConvBoolToUint64,
115 
116     // bool -> float*
117     EOpConvBoolToFloat16,
118     EOpConvBoolToFloat,
119     EOpConvBoolToDouble,
120 
121     // int8_t -> (u)int*
122     EOpConvInt8ToInt16,
123     EOpConvInt8ToInt,
124     EOpConvInt8ToInt64,
125     EOpConvInt8ToUint8,
126     EOpConvInt8ToUint16,
127     EOpConvInt8ToUint,
128     EOpConvInt8ToUint64,
129 
130     // uint8_t -> (u)int*
131     EOpConvUint8ToInt8,
132     EOpConvUint8ToInt16,
133     EOpConvUint8ToInt,
134     EOpConvUint8ToInt64,
135     EOpConvUint8ToUint16,
136     EOpConvUint8ToUint,
137     EOpConvUint8ToUint64,
138 
139     // int8_t -> float*
140     EOpConvInt8ToFloat16,
141     EOpConvInt8ToFloat,
142     EOpConvInt8ToDouble,
143 
144     // uint8_t -> float*
145     EOpConvUint8ToFloat16,
146     EOpConvUint8ToFloat,
147     EOpConvUint8ToDouble,
148 
149     // int16_t -> (u)int*
150     EOpConvInt16ToInt8,
151     EOpConvInt16ToInt,
152     EOpConvInt16ToInt64,
153     EOpConvInt16ToUint8,
154     EOpConvInt16ToUint16,
155     EOpConvInt16ToUint,
156     EOpConvInt16ToUint64,
157 
158     // uint16_t -> (u)int*
159     EOpConvUint16ToInt8,
160     EOpConvUint16ToInt16,
161     EOpConvUint16ToInt,
162     EOpConvUint16ToInt64,
163     EOpConvUint16ToUint8,
164     EOpConvUint16ToUint,
165     EOpConvUint16ToUint64,
166 
167     // int16_t -> float*
168     EOpConvInt16ToFloat16,
169     EOpConvInt16ToFloat,
170     EOpConvInt16ToDouble,
171 
172     // uint16_t -> float*
173     EOpConvUint16ToFloat16,
174     EOpConvUint16ToFloat,
175     EOpConvUint16ToDouble,
176 
177     // int32_t -> (u)int*
178     EOpConvIntToInt8,
179     EOpConvIntToInt16,
180     EOpConvIntToInt64,
181     EOpConvIntToUint8,
182     EOpConvIntToUint16,
183     EOpConvIntToUint,
184     EOpConvIntToUint64,
185 
186     // uint32_t -> (u)int*
187     EOpConvUintToInt8,
188     EOpConvUintToInt16,
189     EOpConvUintToInt,
190     EOpConvUintToInt64,
191     EOpConvUintToUint8,
192     EOpConvUintToUint16,
193     EOpConvUintToUint64,
194 
195     // int32_t -> float*
196     EOpConvIntToFloat16,
197     EOpConvIntToFloat,
198     EOpConvIntToDouble,
199 
200     // uint32_t -> float*
201     EOpConvUintToFloat16,
202     EOpConvUintToFloat,
203     EOpConvUintToDouble,
204 
205     // int64_t -> (u)int*
206     EOpConvInt64ToInt8,
207     EOpConvInt64ToInt16,
208     EOpConvInt64ToInt,
209     EOpConvInt64ToUint8,
210     EOpConvInt64ToUint16,
211     EOpConvInt64ToUint,
212     EOpConvInt64ToUint64,
213 
214     // uint64_t -> (u)int*
215     EOpConvUint64ToInt8,
216     EOpConvUint64ToInt16,
217     EOpConvUint64ToInt,
218     EOpConvUint64ToInt64,
219     EOpConvUint64ToUint8,
220     EOpConvUint64ToUint16,
221     EOpConvUint64ToUint,
222 
223     // int64_t -> float*
224     EOpConvInt64ToFloat16,
225     EOpConvInt64ToFloat,
226     EOpConvInt64ToDouble,
227 
228     // uint64_t -> float*
229     EOpConvUint64ToFloat16,
230     EOpConvUint64ToFloat,
231     EOpConvUint64ToDouble,
232 
233     // float16_t -> (u)int*
234     EOpConvFloat16ToInt8,
235     EOpConvFloat16ToInt16,
236     EOpConvFloat16ToInt,
237     EOpConvFloat16ToInt64,
238     EOpConvFloat16ToUint8,
239     EOpConvFloat16ToUint16,
240     EOpConvFloat16ToUint,
241     EOpConvFloat16ToUint64,
242 
243     // float16_t -> float*
244     EOpConvFloat16ToFloat,
245     EOpConvFloat16ToDouble,
246 
247     // float -> (u)int*
248     EOpConvFloatToInt8,
249     EOpConvFloatToInt16,
250     EOpConvFloatToInt,
251     EOpConvFloatToInt64,
252     EOpConvFloatToUint8,
253     EOpConvFloatToUint16,
254     EOpConvFloatToUint,
255     EOpConvFloatToUint64,
256 
257     // float -> float*
258     EOpConvFloatToFloat16,
259     EOpConvFloatToDouble,
260 
261     // float64 _t-> (u)int*
262     EOpConvDoubleToInt8,
263     EOpConvDoubleToInt16,
264     EOpConvDoubleToInt,
265     EOpConvDoubleToInt64,
266     EOpConvDoubleToUint8,
267     EOpConvDoubleToUint16,
268     EOpConvDoubleToUint,
269     EOpConvDoubleToUint64,
270 
271     // float64_t -> float*
272     EOpConvDoubleToFloat16,
273     EOpConvDoubleToFloat,
274 
275     // uint64_t <-> pointer
276     EOpConvUint64ToPtr,
277     EOpConvPtrToUint64,
278 
279     // uvec2 <-> pointer
280     EOpConvUvec2ToPtr,
281     EOpConvPtrToUvec2,
282 
283     // uint64_t -> accelerationStructureEXT
284     EOpConvUint64ToAccStruct,
285 
286     // uvec2 -> accelerationStructureEXT
287     EOpConvUvec2ToAccStruct,
288 
289     //
290     // binary operations
291     //
292 
293     EOpAdd,
294     EOpSub,
295     EOpMul,
296     EOpDiv,
297     EOpMod,
298     EOpRightShift,
299     EOpLeftShift,
300     EOpAnd,
301     EOpInclusiveOr,
302     EOpExclusiveOr,
303     EOpEqual,
304     EOpNotEqual,
305     EOpVectorEqual,
306     EOpVectorNotEqual,
307     EOpLessThan,
308     EOpGreaterThan,
309     EOpLessThanEqual,
310     EOpGreaterThanEqual,
311     EOpComma,
312 
313     EOpVectorTimesScalar,
314     EOpVectorTimesMatrix,
315     EOpMatrixTimesVector,
316     EOpMatrixTimesScalar,
317 
318     EOpLogicalOr,
319     EOpLogicalXor,
320     EOpLogicalAnd,
321 
322     EOpIndexDirect,
323     EOpIndexIndirect,
324     EOpIndexDirectStruct,
325 
326     EOpVectorSwizzle,
327 
328     EOpMethod,
329     EOpScoping,
330 
331     //
332     // Built-in functions mapped to operators
333     //
334 
335     EOpRadians,
336     EOpDegrees,
337     EOpSin,
338     EOpCos,
339     EOpTan,
340     EOpAsin,
341     EOpAcos,
342     EOpAtan,
343     EOpSinh,
344     EOpCosh,
345     EOpTanh,
346     EOpAsinh,
347     EOpAcosh,
348     EOpAtanh,
349 
350     EOpPow,
351     EOpExp,
352     EOpLog,
353     EOpExp2,
354     EOpLog2,
355     EOpSqrt,
356     EOpInverseSqrt,
357 
358     EOpAbs,
359     EOpSign,
360     EOpFloor,
361     EOpTrunc,
362     EOpRound,
363     EOpRoundEven,
364     EOpCeil,
365     EOpFract,
366     EOpModf,
367     EOpMin,
368     EOpMax,
369     EOpClamp,
370     EOpMix,
371     EOpStep,
372     EOpSmoothStep,
373 
374     EOpIsNan,
375     EOpIsInf,
376 
377     EOpFma,
378 
379     EOpFrexp,
380     EOpLdexp,
381 
382     EOpFloatBitsToInt,
383     EOpFloatBitsToUint,
384     EOpIntBitsToFloat,
385     EOpUintBitsToFloat,
386     EOpDoubleBitsToInt64,
387     EOpDoubleBitsToUint64,
388     EOpInt64BitsToDouble,
389     EOpUint64BitsToDouble,
390     EOpFloat16BitsToInt16,
391     EOpFloat16BitsToUint16,
392     EOpInt16BitsToFloat16,
393     EOpUint16BitsToFloat16,
394     EOpPackSnorm2x16,
395     EOpUnpackSnorm2x16,
396     EOpPackUnorm2x16,
397     EOpUnpackUnorm2x16,
398     EOpPackSnorm4x8,
399     EOpUnpackSnorm4x8,
400     EOpPackUnorm4x8,
401     EOpUnpackUnorm4x8,
402     EOpPackHalf2x16,
403     EOpUnpackHalf2x16,
404     EOpPackDouble2x32,
405     EOpUnpackDouble2x32,
406     EOpPackInt2x32,
407     EOpUnpackInt2x32,
408     EOpPackUint2x32,
409     EOpUnpackUint2x32,
410     EOpPackFloat2x16,
411     EOpUnpackFloat2x16,
412     EOpPackInt2x16,
413     EOpUnpackInt2x16,
414     EOpPackUint2x16,
415     EOpUnpackUint2x16,
416     EOpPackInt4x16,
417     EOpUnpackInt4x16,
418     EOpPackUint4x16,
419     EOpUnpackUint4x16,
420     EOpPack16,
421     EOpPack32,
422     EOpPack64,
423     EOpUnpack32,
424     EOpUnpack16,
425     EOpUnpack8,
426 
427     EOpLength,
428     EOpDistance,
429     EOpDot,
430     EOpCross,
431     EOpNormalize,
432     EOpFaceForward,
433     EOpReflect,
434     EOpRefract,
435 
436     EOpMin3,
437     EOpMax3,
438     EOpMid3,
439 
440     EOpDPdx,            // Fragment only
441     EOpDPdy,            // Fragment only
442     EOpFwidth,          // Fragment only
443     EOpDPdxFine,        // Fragment only
444     EOpDPdyFine,        // Fragment only
445     EOpFwidthFine,      // Fragment only
446     EOpDPdxCoarse,      // Fragment only
447     EOpDPdyCoarse,      // Fragment only
448     EOpFwidthCoarse,    // Fragment only
449 
450     EOpInterpolateAtCentroid, // Fragment only
451     EOpInterpolateAtSample,   // Fragment only
452     EOpInterpolateAtOffset,   // Fragment only
453     EOpInterpolateAtVertex,
454 
455     EOpMatrixTimesMatrix,
456     EOpOuterProduct,
457     EOpDeterminant,
458     EOpMatrixInverse,
459     EOpTranspose,
460 
461     EOpFtransform,
462 
463     EOpNoise,
464 
465     EOpEmitVertex,           // geometry only
466     EOpEndPrimitive,         // geometry only
467     EOpEmitStreamVertex,     // geometry only
468     EOpEndStreamPrimitive,   // geometry only
469 
470     EOpBarrier,
471     EOpMemoryBarrier,
472     EOpMemoryBarrierAtomicCounter,
473     EOpMemoryBarrierBuffer,
474     EOpMemoryBarrierImage,
475     EOpMemoryBarrierShared,  // compute only
476     EOpGroupMemoryBarrier,   // compute only
477 
478     EOpBallot,
479     EOpReadInvocation,
480     EOpReadFirstInvocation,
481 
482     EOpAnyInvocation,
483     EOpAllInvocations,
484     EOpAllInvocationsEqual,
485 
486     EOpSubgroupGuardStart,
487     EOpSubgroupBarrier,
488     EOpSubgroupMemoryBarrier,
489     EOpSubgroupMemoryBarrierBuffer,
490     EOpSubgroupMemoryBarrierImage,
491     EOpSubgroupMemoryBarrierShared, // compute only
492     EOpSubgroupElect,
493     EOpSubgroupAll,
494     EOpSubgroupAny,
495     EOpSubgroupAllEqual,
496     EOpSubgroupBroadcast,
497     EOpSubgroupBroadcastFirst,
498     EOpSubgroupBallot,
499     EOpSubgroupInverseBallot,
500     EOpSubgroupBallotBitExtract,
501     EOpSubgroupBallotBitCount,
502     EOpSubgroupBallotInclusiveBitCount,
503     EOpSubgroupBallotExclusiveBitCount,
504     EOpSubgroupBallotFindLSB,
505     EOpSubgroupBallotFindMSB,
506     EOpSubgroupShuffle,
507     EOpSubgroupShuffleXor,
508     EOpSubgroupShuffleUp,
509     EOpSubgroupShuffleDown,
510     EOpSubgroupAdd,
511     EOpSubgroupMul,
512     EOpSubgroupMin,
513     EOpSubgroupMax,
514     EOpSubgroupAnd,
515     EOpSubgroupOr,
516     EOpSubgroupXor,
517     EOpSubgroupInclusiveAdd,
518     EOpSubgroupInclusiveMul,
519     EOpSubgroupInclusiveMin,
520     EOpSubgroupInclusiveMax,
521     EOpSubgroupInclusiveAnd,
522     EOpSubgroupInclusiveOr,
523     EOpSubgroupInclusiveXor,
524     EOpSubgroupExclusiveAdd,
525     EOpSubgroupExclusiveMul,
526     EOpSubgroupExclusiveMin,
527     EOpSubgroupExclusiveMax,
528     EOpSubgroupExclusiveAnd,
529     EOpSubgroupExclusiveOr,
530     EOpSubgroupExclusiveXor,
531     EOpSubgroupClusteredAdd,
532     EOpSubgroupClusteredMul,
533     EOpSubgroupClusteredMin,
534     EOpSubgroupClusteredMax,
535     EOpSubgroupClusteredAnd,
536     EOpSubgroupClusteredOr,
537     EOpSubgroupClusteredXor,
538     EOpSubgroupQuadBroadcast,
539     EOpSubgroupQuadSwapHorizontal,
540     EOpSubgroupQuadSwapVertical,
541     EOpSubgroupQuadSwapDiagonal,
542 
543     EOpSubgroupPartition,
544     EOpSubgroupPartitionedAdd,
545     EOpSubgroupPartitionedMul,
546     EOpSubgroupPartitionedMin,
547     EOpSubgroupPartitionedMax,
548     EOpSubgroupPartitionedAnd,
549     EOpSubgroupPartitionedOr,
550     EOpSubgroupPartitionedXor,
551     EOpSubgroupPartitionedInclusiveAdd,
552     EOpSubgroupPartitionedInclusiveMul,
553     EOpSubgroupPartitionedInclusiveMin,
554     EOpSubgroupPartitionedInclusiveMax,
555     EOpSubgroupPartitionedInclusiveAnd,
556     EOpSubgroupPartitionedInclusiveOr,
557     EOpSubgroupPartitionedInclusiveXor,
558     EOpSubgroupPartitionedExclusiveAdd,
559     EOpSubgroupPartitionedExclusiveMul,
560     EOpSubgroupPartitionedExclusiveMin,
561     EOpSubgroupPartitionedExclusiveMax,
562     EOpSubgroupPartitionedExclusiveAnd,
563     EOpSubgroupPartitionedExclusiveOr,
564     EOpSubgroupPartitionedExclusiveXor,
565 
566     EOpSubgroupGuardStop,
567 
568     EOpMinInvocations,
569     EOpMaxInvocations,
570     EOpAddInvocations,
571     EOpMinInvocationsNonUniform,
572     EOpMaxInvocationsNonUniform,
573     EOpAddInvocationsNonUniform,
574     EOpMinInvocationsInclusiveScan,
575     EOpMaxInvocationsInclusiveScan,
576     EOpAddInvocationsInclusiveScan,
577     EOpMinInvocationsInclusiveScanNonUniform,
578     EOpMaxInvocationsInclusiveScanNonUniform,
579     EOpAddInvocationsInclusiveScanNonUniform,
580     EOpMinInvocationsExclusiveScan,
581     EOpMaxInvocationsExclusiveScan,
582     EOpAddInvocationsExclusiveScan,
583     EOpMinInvocationsExclusiveScanNonUniform,
584     EOpMaxInvocationsExclusiveScanNonUniform,
585     EOpAddInvocationsExclusiveScanNonUniform,
586     EOpSwizzleInvocations,
587     EOpSwizzleInvocationsMasked,
588     EOpWriteInvocation,
589     EOpMbcnt,
590 
591     EOpCubeFaceIndex,
592     EOpCubeFaceCoord,
593     EOpTime,
594 
595     EOpAtomicAdd,
596     EOpAtomicMin,
597     EOpAtomicMax,
598     EOpAtomicAnd,
599     EOpAtomicOr,
600     EOpAtomicXor,
601     EOpAtomicExchange,
602     EOpAtomicCompSwap,
603     EOpAtomicLoad,
604     EOpAtomicStore,
605 
606     EOpAtomicCounterIncrement, // results in pre-increment value
607     EOpAtomicCounterDecrement, // results in post-decrement value
608     EOpAtomicCounter,
609     EOpAtomicCounterAdd,
610     EOpAtomicCounterSubtract,
611     EOpAtomicCounterMin,
612     EOpAtomicCounterMax,
613     EOpAtomicCounterAnd,
614     EOpAtomicCounterOr,
615     EOpAtomicCounterXor,
616     EOpAtomicCounterExchange,
617     EOpAtomicCounterCompSwap,
618 
619     EOpAny,
620     EOpAll,
621 
622     EOpCooperativeMatrixLoad,
623     EOpCooperativeMatrixStore,
624     EOpCooperativeMatrixMulAdd,
625 
626     EOpBeginInvocationInterlock, // Fragment only
627     EOpEndInvocationInterlock, // Fragment only
628 
629     EOpIsHelperInvocation,
630 
631     EOpDebugPrintf,
632 
633     //
634     // Branch
635     //
636 
637     EOpKill,                // Fragment only
638     EOpTerminateInvocation, // Fragment only
639     EOpDemote,              // Fragment only
640     EOpTerminateRayKHR,         // Any-hit only
641     EOpIgnoreIntersectionKHR,   // Any-hit only
642     EOpReturn,
643     EOpBreak,
644     EOpContinue,
645     EOpCase,
646     EOpDefault,
647 
648     //
649     // Constructors
650     //
651 
652     EOpConstructGuardStart,
653     EOpConstructInt,          // these first scalar forms also identify what implicit conversion is needed
654     EOpConstructUint,
655     EOpConstructInt8,
656     EOpConstructUint8,
657     EOpConstructInt16,
658     EOpConstructUint16,
659     EOpConstructInt64,
660     EOpConstructUint64,
661     EOpConstructBool,
662     EOpConstructFloat,
663     EOpConstructDouble,
664     // Keep vector and matrix constructors in a consistent relative order for
665     // TParseContext::constructBuiltIn, which converts between 8/16/32 bit
666     // vector constructors
667     EOpConstructVec2,
668     EOpConstructVec3,
669     EOpConstructVec4,
670     EOpConstructMat2x2,
671     EOpConstructMat2x3,
672     EOpConstructMat2x4,
673     EOpConstructMat3x2,
674     EOpConstructMat3x3,
675     EOpConstructMat3x4,
676     EOpConstructMat4x2,
677     EOpConstructMat4x3,
678     EOpConstructMat4x4,
679     EOpConstructDVec2,
680     EOpConstructDVec3,
681     EOpConstructDVec4,
682     EOpConstructBVec2,
683     EOpConstructBVec3,
684     EOpConstructBVec4,
685     EOpConstructI8Vec2,
686     EOpConstructI8Vec3,
687     EOpConstructI8Vec4,
688     EOpConstructU8Vec2,
689     EOpConstructU8Vec3,
690     EOpConstructU8Vec4,
691     EOpConstructI16Vec2,
692     EOpConstructI16Vec3,
693     EOpConstructI16Vec4,
694     EOpConstructU16Vec2,
695     EOpConstructU16Vec3,
696     EOpConstructU16Vec4,
697     EOpConstructIVec2,
698     EOpConstructIVec3,
699     EOpConstructIVec4,
700     EOpConstructUVec2,
701     EOpConstructUVec3,
702     EOpConstructUVec4,
703     EOpConstructI64Vec2,
704     EOpConstructI64Vec3,
705     EOpConstructI64Vec4,
706     EOpConstructU64Vec2,
707     EOpConstructU64Vec3,
708     EOpConstructU64Vec4,
709     EOpConstructDMat2x2,
710     EOpConstructDMat2x3,
711     EOpConstructDMat2x4,
712     EOpConstructDMat3x2,
713     EOpConstructDMat3x3,
714     EOpConstructDMat3x4,
715     EOpConstructDMat4x2,
716     EOpConstructDMat4x3,
717     EOpConstructDMat4x4,
718     EOpConstructIMat2x2,
719     EOpConstructIMat2x3,
720     EOpConstructIMat2x4,
721     EOpConstructIMat3x2,
722     EOpConstructIMat3x3,
723     EOpConstructIMat3x4,
724     EOpConstructIMat4x2,
725     EOpConstructIMat4x3,
726     EOpConstructIMat4x4,
727     EOpConstructUMat2x2,
728     EOpConstructUMat2x3,
729     EOpConstructUMat2x4,
730     EOpConstructUMat3x2,
731     EOpConstructUMat3x3,
732     EOpConstructUMat3x4,
733     EOpConstructUMat4x2,
734     EOpConstructUMat4x3,
735     EOpConstructUMat4x4,
736     EOpConstructBMat2x2,
737     EOpConstructBMat2x3,
738     EOpConstructBMat2x4,
739     EOpConstructBMat3x2,
740     EOpConstructBMat3x3,
741     EOpConstructBMat3x4,
742     EOpConstructBMat4x2,
743     EOpConstructBMat4x3,
744     EOpConstructBMat4x4,
745     EOpConstructFloat16,
746     EOpConstructF16Vec2,
747     EOpConstructF16Vec3,
748     EOpConstructF16Vec4,
749     EOpConstructF16Mat2x2,
750     EOpConstructF16Mat2x3,
751     EOpConstructF16Mat2x4,
752     EOpConstructF16Mat3x2,
753     EOpConstructF16Mat3x3,
754     EOpConstructF16Mat3x4,
755     EOpConstructF16Mat4x2,
756     EOpConstructF16Mat4x3,
757     EOpConstructF16Mat4x4,
758     EOpConstructStruct,
759     EOpConstructTextureSampler,
760     EOpConstructNonuniform,     // expected to be transformed away, not present in final AST
761     EOpConstructReference,
762     EOpConstructCooperativeMatrix,
763     EOpConstructAccStruct,
764     EOpConstructGuardEnd,
765 
766     //
767     // moves
768     //
769 
770     EOpAssign,
771     EOpAddAssign,
772     EOpSubAssign,
773     EOpMulAssign,
774     EOpVectorTimesMatrixAssign,
775     EOpVectorTimesScalarAssign,
776     EOpMatrixTimesScalarAssign,
777     EOpMatrixTimesMatrixAssign,
778     EOpDivAssign,
779     EOpModAssign,
780     EOpAndAssign,
781     EOpInclusiveOrAssign,
782     EOpExclusiveOrAssign,
783     EOpLeftShiftAssign,
784     EOpRightShiftAssign,
785 
786     //
787     // Array operators
788     //
789 
790     // Can apply to arrays, vectors, or matrices.
791     // Can be decomposed to a constant at compile time, but this does not always happen,
792     // due to link-time effects. So, consumer can expect either a link-time sized or
793     // run-time sized array.
794     EOpArrayLength,
795 
796     //
797     // Image operations
798     //
799 
800     EOpImageGuardBegin,
801 
802     EOpImageQuerySize,
803     EOpImageQuerySamples,
804     EOpImageLoad,
805     EOpImageStore,
806     EOpImageLoadLod,
807     EOpImageStoreLod,
808     EOpImageAtomicAdd,
809     EOpImageAtomicMin,
810     EOpImageAtomicMax,
811     EOpImageAtomicAnd,
812     EOpImageAtomicOr,
813     EOpImageAtomicXor,
814     EOpImageAtomicExchange,
815     EOpImageAtomicCompSwap,
816     EOpImageAtomicLoad,
817     EOpImageAtomicStore,
818 
819     EOpSubpassLoad,
820     EOpSubpassLoadMS,
821     EOpSparseImageLoad,
822     EOpSparseImageLoadLod,
823 
824     EOpImageGuardEnd,
825 
826     //
827     // Texture operations
828     //
829 
830     EOpTextureGuardBegin,
831 
832     EOpTextureQuerySize,
833     EOpTextureQueryLod,
834     EOpTextureQueryLevels,
835     EOpTextureQuerySamples,
836 
837     EOpSamplingGuardBegin,
838 
839     EOpTexture,
840     EOpTextureProj,
841     EOpTextureLod,
842     EOpTextureOffset,
843     EOpTextureFetch,
844     EOpTextureFetchOffset,
845     EOpTextureProjOffset,
846     EOpTextureLodOffset,
847     EOpTextureProjLod,
848     EOpTextureProjLodOffset,
849     EOpTextureGrad,
850     EOpTextureGradOffset,
851     EOpTextureProjGrad,
852     EOpTextureProjGradOffset,
853     EOpTextureGather,
854     EOpTextureGatherOffset,
855     EOpTextureGatherOffsets,
856     EOpTextureClamp,
857     EOpTextureOffsetClamp,
858     EOpTextureGradClamp,
859     EOpTextureGradOffsetClamp,
860     EOpTextureGatherLod,
861     EOpTextureGatherLodOffset,
862     EOpTextureGatherLodOffsets,
863     EOpFragmentMaskFetch,
864     EOpFragmentFetch,
865 
866     EOpSparseTextureGuardBegin,
867 
868     EOpSparseTexture,
869     EOpSparseTextureLod,
870     EOpSparseTextureOffset,
871     EOpSparseTextureFetch,
872     EOpSparseTextureFetchOffset,
873     EOpSparseTextureLodOffset,
874     EOpSparseTextureGrad,
875     EOpSparseTextureGradOffset,
876     EOpSparseTextureGather,
877     EOpSparseTextureGatherOffset,
878     EOpSparseTextureGatherOffsets,
879     EOpSparseTexelsResident,
880     EOpSparseTextureClamp,
881     EOpSparseTextureOffsetClamp,
882     EOpSparseTextureGradClamp,
883     EOpSparseTextureGradOffsetClamp,
884     EOpSparseTextureGatherLod,
885     EOpSparseTextureGatherLodOffset,
886     EOpSparseTextureGatherLodOffsets,
887 
888     EOpSparseTextureGuardEnd,
889 
890     EOpImageFootprintGuardBegin,
891     EOpImageSampleFootprintNV,
892     EOpImageSampleFootprintClampNV,
893     EOpImageSampleFootprintLodNV,
894     EOpImageSampleFootprintGradNV,
895     EOpImageSampleFootprintGradClampNV,
896     EOpImageFootprintGuardEnd,
897     EOpSamplingGuardEnd,
898     EOpTextureGuardEnd,
899 
900     //
901     // Integer operations
902     //
903 
904     EOpAddCarry,
905     EOpSubBorrow,
906     EOpUMulExtended,
907     EOpIMulExtended,
908     EOpBitfieldExtract,
909     EOpBitfieldInsert,
910     EOpBitFieldReverse,
911     EOpBitCount,
912     EOpFindLSB,
913     EOpFindMSB,
914 
915     EOpCountLeadingZeros,
916     EOpCountTrailingZeros,
917     EOpAbsDifference,
918     EOpAddSaturate,
919     EOpSubSaturate,
920     EOpAverage,
921     EOpAverageRounded,
922     EOpMul32x16,
923 
924     EOpTraceNV,
925     EOpTraceKHR,
926     EOpReportIntersection,
927     EOpIgnoreIntersectionNV,
928     EOpTerminateRayNV,
929     EOpExecuteCallableNV,
930     EOpExecuteCallableKHR,
931     EOpWritePackedPrimitiveIndices4x8NV,
932 
933     //
934     // GL_EXT_ray_query operations
935     //
936 
937     EOpRayQueryInitialize,
938     EOpRayQueryTerminate,
939     EOpRayQueryGenerateIntersection,
940     EOpRayQueryConfirmIntersection,
941     EOpRayQueryProceed,
942     EOpRayQueryGetIntersectionType,
943     EOpRayQueryGetRayTMin,
944     EOpRayQueryGetRayFlags,
945     EOpRayQueryGetIntersectionT,
946     EOpRayQueryGetIntersectionInstanceCustomIndex,
947     EOpRayQueryGetIntersectionInstanceId,
948     EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
949     EOpRayQueryGetIntersectionGeometryIndex,
950     EOpRayQueryGetIntersectionPrimitiveIndex,
951     EOpRayQueryGetIntersectionBarycentrics,
952     EOpRayQueryGetIntersectionFrontFace,
953     EOpRayQueryGetIntersectionCandidateAABBOpaque,
954     EOpRayQueryGetIntersectionObjectRayDirection,
955     EOpRayQueryGetIntersectionObjectRayOrigin,
956     EOpRayQueryGetWorldRayDirection,
957     EOpRayQueryGetWorldRayOrigin,
958     EOpRayQueryGetIntersectionObjectToWorld,
959     EOpRayQueryGetIntersectionWorldToObject,
960 
961     //
962     // HLSL operations
963     //
964 
965     EOpClip,                // discard if input value < 0
966     EOpIsFinite,
967     EOpLog10,               // base 10 log
968     EOpRcp,                 // 1/x
969     EOpSaturate,            // clamp from 0 to 1
970     EOpSinCos,              // sin and cos in out parameters
971     EOpGenMul,              // mul(x,y) on any of mat/vec/scalars
972     EOpDst,                 // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
973     EOpInterlockedAdd,      // atomic ops, but uses [optional] out arg instead of return
974     EOpInterlockedAnd,      // ...
975     EOpInterlockedCompareExchange, // ...
976     EOpInterlockedCompareStore,    // ...
977     EOpInterlockedExchange, // ...
978     EOpInterlockedMax,      // ...
979     EOpInterlockedMin,      // ...
980     EOpInterlockedOr,       // ...
981     EOpInterlockedXor,      // ...
982     EOpAllMemoryBarrierWithGroupSync,    // memory barriers without non-hlsl AST equivalents
983     EOpDeviceMemoryBarrier,              // ...
984     EOpDeviceMemoryBarrierWithGroupSync, // ...
985     EOpWorkgroupMemoryBarrier,           // ...
986     EOpWorkgroupMemoryBarrierWithGroupSync, // ...
987     EOpEvaluateAttributeSnapped,         // InterpolateAtOffset with int position on 16x16 grid
988     EOpF32tof16,                         // HLSL conversion: half of a PackHalf2x16
989     EOpF16tof32,                         // HLSL conversion: half of an UnpackHalf2x16
990     EOpLit,                              // HLSL lighting coefficient vector
991     EOpTextureBias,                      // HLSL texture bias: will be lowered to EOpTexture
992     EOpAsDouble,                         // slightly different from EOpUint64BitsToDouble
993     EOpD3DCOLORtoUBYTE4,                 // convert and swizzle 4-component color to UBYTE4 range
994 
995     EOpMethodSample,                     // Texture object methods.  These are translated to existing
996     EOpMethodSampleBias,                 // AST methods, and exist to represent HLSL semantics until that
997     EOpMethodSampleCmp,                  // translation is performed.  See HlslParseContext::decomposeSampleMethods().
998     EOpMethodSampleCmpLevelZero,         // ...
999     EOpMethodSampleGrad,                 // ...
1000     EOpMethodSampleLevel,                // ...
1001     EOpMethodLoad,                       // ...
1002     EOpMethodGetDimensions,              // ...
1003     EOpMethodGetSamplePosition,          // ...
1004     EOpMethodGather,                     // ...
1005     EOpMethodCalculateLevelOfDetail,     // ...
1006     EOpMethodCalculateLevelOfDetailUnclamped,     // ...
1007 
1008     // Load already defined above for textures
1009     EOpMethodLoad2,                      // Structure buffer object methods.  These are translated to existing
1010     EOpMethodLoad3,                      // AST methods, and exist to represent HLSL semantics until that
1011     EOpMethodLoad4,                      // translation is performed.  See HlslParseContext::decomposeSampleMethods().
1012     EOpMethodStore,                      // ...
1013     EOpMethodStore2,                     // ...
1014     EOpMethodStore3,                     // ...
1015     EOpMethodStore4,                     // ...
1016     EOpMethodIncrementCounter,           // ...
1017     EOpMethodDecrementCounter,           // ...
1018     // EOpMethodAppend is defined for geo shaders below
1019     EOpMethodConsume,
1020 
1021     // SM5 texture methods
1022     EOpMethodGatherRed,                  // These are covered under the above EOpMethodSample comment about
1023     EOpMethodGatherGreen,                // translation to existing AST opcodes.  They exist temporarily
1024     EOpMethodGatherBlue,                 // because HLSL arguments are slightly different.
1025     EOpMethodGatherAlpha,                // ...
1026     EOpMethodGatherCmp,                  // ...
1027     EOpMethodGatherCmpRed,               // ...
1028     EOpMethodGatherCmpGreen,             // ...
1029     EOpMethodGatherCmpBlue,              // ...
1030     EOpMethodGatherCmpAlpha,             // ...
1031 
1032     // geometry methods
1033     EOpMethodAppend,                     // Geometry shader methods
1034     EOpMethodRestartStrip,               // ...
1035 
1036     // matrix
1037     EOpMatrixSwizzle,                    // select multiple matrix components (non-column)
1038 
1039     // SM6 wave ops
1040     EOpWaveGetLaneCount,                 // Will decompose to gl_SubgroupSize.
1041     EOpWaveGetLaneIndex,                 // Will decompose to gl_SubgroupInvocationID.
1042     EOpWaveActiveCountBits,              // Will decompose to subgroupBallotBitCount(subgroupBallot()).
1043     EOpWavePrefixCountBits,              // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
1044 
1045     // Shader Clock Ops
1046     EOpReadClockSubgroupKHR,
1047     EOpReadClockDeviceKHR,
1048 };
1049 
1050 class TIntermTraverser;
1051 class TIntermOperator;
1052 class TIntermAggregate;
1053 class TIntermUnary;
1054 class TIntermBinary;
1055 class TIntermConstantUnion;
1056 class TIntermSelection;
1057 class TIntermSwitch;
1058 class TIntermBranch;
1059 class TIntermTyped;
1060 class TIntermMethod;
1061 class TIntermSymbol;
1062 class TIntermLoop;
1063 
1064 } // end namespace glslang
1065 
1066 //
1067 // Base class for the tree nodes
1068 //
1069 // (Put outside the glslang namespace, as it's used as part of the external interface.)
1070 //
1071 class TIntermNode {
1072 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1073     POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1074 
1075     TIntermNode() { loc.init(); }
getLoc()1076     virtual const glslang::TSourceLoc& getLoc() const { return loc; }
setLoc(const glslang::TSourceLoc & l)1077     virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
1078     virtual void traverse(glslang::TIntermTraverser*) = 0;
getAsTyped()1079     virtual       glslang::TIntermTyped*         getAsTyped()               { return 0; }
getAsOperator()1080     virtual       glslang::TIntermOperator*      getAsOperator()            { return 0; }
getAsConstantUnion()1081     virtual       glslang::TIntermConstantUnion* getAsConstantUnion()       { return 0; }
getAsAggregate()1082     virtual       glslang::TIntermAggregate*     getAsAggregate()           { return 0; }
getAsUnaryNode()1083     virtual       glslang::TIntermUnary*         getAsUnaryNode()           { return 0; }
getAsBinaryNode()1084     virtual       glslang::TIntermBinary*        getAsBinaryNode()          { return 0; }
getAsSelectionNode()1085     virtual       glslang::TIntermSelection*     getAsSelectionNode()       { return 0; }
getAsSwitchNode()1086     virtual       glslang::TIntermSwitch*        getAsSwitchNode()          { return 0; }
getAsMethodNode()1087     virtual       glslang::TIntermMethod*        getAsMethodNode()          { return 0; }
getAsSymbolNode()1088     virtual       glslang::TIntermSymbol*        getAsSymbolNode()          { return 0; }
getAsBranchNode()1089     virtual       glslang::TIntermBranch*        getAsBranchNode()          { return 0; }
getAsLoopNode()1090     virtual       glslang::TIntermLoop*          getAsLoopNode()            { return 0; }
1091 
getAsTyped()1092     virtual const glslang::TIntermTyped*         getAsTyped()         const { return 0; }
getAsOperator()1093     virtual const glslang::TIntermOperator*      getAsOperator()      const { return 0; }
getAsConstantUnion()1094     virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
getAsAggregate()1095     virtual const glslang::TIntermAggregate*     getAsAggregate()     const { return 0; }
getAsUnaryNode()1096     virtual const glslang::TIntermUnary*         getAsUnaryNode()     const { return 0; }
getAsBinaryNode()1097     virtual const glslang::TIntermBinary*        getAsBinaryNode()    const { return 0; }
getAsSelectionNode()1098     virtual const glslang::TIntermSelection*     getAsSelectionNode() const { return 0; }
getAsSwitchNode()1099     virtual const glslang::TIntermSwitch*        getAsSwitchNode()    const { return 0; }
getAsMethodNode()1100     virtual const glslang::TIntermMethod*        getAsMethodNode()    const { return 0; }
getAsSymbolNode()1101     virtual const glslang::TIntermSymbol*        getAsSymbolNode()    const { return 0; }
getAsBranchNode()1102     virtual const glslang::TIntermBranch*        getAsBranchNode()    const { return 0; }
getAsLoopNode()1103     virtual const glslang::TIntermLoop*          getAsLoopNode()      const { return 0; }
~TIntermNode()1104     virtual ~TIntermNode() { }
1105 
1106 protected:
1107     TIntermNode(const TIntermNode&);
1108     TIntermNode& operator=(const TIntermNode&);
1109     glslang::TSourceLoc loc;
1110 };
1111 
1112 namespace glslang {
1113 
1114 //
1115 // This is just to help yacc.
1116 //
1117 struct TIntermNodePair {
1118     TIntermNode* node1;
1119     TIntermNode* node2;
1120 };
1121 
1122 //
1123 // Intermediate class for nodes that have a type.
1124 //
1125 class TIntermTyped : public TIntermNode {
1126 public:
TIntermTyped(const TType & t)1127     TIntermTyped(const TType& t) { type.shallowCopy(t); }
TIntermTyped(TBasicType basicType)1128     TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
getAsTyped()1129     virtual       TIntermTyped* getAsTyped()       { return this; }
getAsTyped()1130     virtual const TIntermTyped* getAsTyped() const { return this; }
setType(const TType & t)1131     virtual void setType(const TType& t) { type.shallowCopy(t); }
getType()1132     virtual const TType& getType() const { return type; }
getWritableType()1133     virtual TType& getWritableType() { return type; }
1134 
getBasicType()1135     virtual TBasicType getBasicType() const { return type.getBasicType(); }
getQualifier()1136     virtual TQualifier& getQualifier() { return type.getQualifier(); }
getQualifier()1137     virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
1138     virtual void propagatePrecision(TPrecisionQualifier);
getVectorSize()1139     virtual int getVectorSize() const { return type.getVectorSize(); }
getMatrixCols()1140     virtual int getMatrixCols() const { return type.getMatrixCols(); }
getMatrixRows()1141     virtual int getMatrixRows() const { return type.getMatrixRows(); }
isMatrix()1142     virtual bool isMatrix() const { return type.isMatrix(); }
isArray()1143     virtual bool isArray()  const { return type.isArray(); }
isVector()1144     virtual bool isVector() const { return type.isVector(); }
isScalar()1145     virtual bool isScalar() const { return type.isScalar(); }
isStruct()1146     virtual bool isStruct() const { return type.isStruct(); }
isFloatingDomain()1147     virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
isIntegerDomain()1148     virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
isAtomic()1149     bool isAtomic() const { return type.isAtomic(); }
isReference()1150     bool isReference() const { return type.isReference(); }
getCompleteString()1151     TString getCompleteString() const { return type.getCompleteString(); }
1152 
1153 protected:
1154     TIntermTyped& operator=(const TIntermTyped&);
1155     TType type;
1156 };
1157 
1158 //
1159 // Handle for, do-while, and while loops.
1160 //
1161 class TIntermLoop : public TIntermNode {
1162 public:
TIntermLoop(TIntermNode * aBody,TIntermTyped * aTest,TIntermTyped * aTerminal,bool testFirst)1163     TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
1164         body(aBody),
1165         test(aTest),
1166         terminal(aTerminal),
1167         first(testFirst),
1168         unroll(false),
1169         dontUnroll(false),
1170         dependency(0),
1171         minIterations(0),
1172         maxIterations(iterationsInfinite),
1173         iterationMultiple(1),
1174         peelCount(0),
1175         partialCount(0)
1176     { }
1177 
getAsLoopNode()1178     virtual       TIntermLoop* getAsLoopNode() { return this; }
getAsLoopNode()1179     virtual const TIntermLoop* getAsLoopNode() const { return this; }
1180     virtual void traverse(TIntermTraverser*);
getBody()1181     TIntermNode*  getBody() const { return body; }
getTest()1182     TIntermTyped* getTest() const { return test; }
getTerminal()1183     TIntermTyped* getTerminal() const { return terminal; }
testFirst()1184     bool testFirst() const { return first; }
1185 
setUnroll()1186     void setUnroll()     { unroll = true; }
setDontUnroll()1187     void setDontUnroll() {
1188         dontUnroll = true;
1189         peelCount = 0;
1190         partialCount = 0;
1191     }
getUnroll()1192     bool getUnroll()     const { return unroll; }
getDontUnroll()1193     bool getDontUnroll() const { return dontUnroll; }
1194 
1195     static const unsigned int dependencyInfinite = 0xFFFFFFFF;
1196     static const unsigned int iterationsInfinite = 0xFFFFFFFF;
setLoopDependency(int d)1197     void setLoopDependency(int d) { dependency = d; }
getLoopDependency()1198     int getLoopDependency() const { return dependency; }
1199 
setMinIterations(unsigned int v)1200     void setMinIterations(unsigned int v) { minIterations = v; }
getMinIterations()1201     unsigned int getMinIterations() const { return minIterations; }
setMaxIterations(unsigned int v)1202     void setMaxIterations(unsigned int v) { maxIterations = v; }
getMaxIterations()1203     unsigned int getMaxIterations() const { return maxIterations; }
setIterationMultiple(unsigned int v)1204     void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
getIterationMultiple()1205     unsigned int getIterationMultiple() const { return iterationMultiple; }
setPeelCount(unsigned int v)1206     void setPeelCount(unsigned int v) {
1207         peelCount = v;
1208         dontUnroll = false;
1209     }
getPeelCount()1210     unsigned int getPeelCount() const { return peelCount; }
setPartialCount(unsigned int v)1211     void setPartialCount(unsigned int v) {
1212         partialCount = v;
1213         dontUnroll = false;
1214     }
getPartialCount()1215     unsigned int getPartialCount() const { return partialCount; }
1216 
1217 protected:
1218     TIntermNode* body;       // code to loop over
1219     TIntermTyped* test;      // exit condition associated with loop, could be 0 for 'for' loops
1220     TIntermTyped* terminal;  // exists for for-loops
1221     bool first;              // true for while and for, not for do-while
1222     bool unroll;             // true if unroll requested
1223     bool dontUnroll;         // true if request to not unroll
1224     unsigned int dependency; // loop dependency hint; 0 means not set or unknown
1225     unsigned int minIterations;      // as per the SPIR-V specification
1226     unsigned int maxIterations;      // as per the SPIR-V specification
1227     unsigned int iterationMultiple;  // as per the SPIR-V specification
1228     unsigned int peelCount;          // as per the SPIR-V specification
1229     unsigned int partialCount;       // as per the SPIR-V specification
1230 };
1231 
1232 //
1233 // Handle case, break, continue, return, and kill.
1234 //
1235 class TIntermBranch : public TIntermNode {
1236 public:
TIntermBranch(TOperator op,TIntermTyped * e)1237     TIntermBranch(TOperator op, TIntermTyped* e) :
1238         flowOp(op),
1239         expression(e) { }
getAsBranchNode()1240     virtual       TIntermBranch* getAsBranchNode()       { return this; }
getAsBranchNode()1241     virtual const TIntermBranch* getAsBranchNode() const { return this; }
1242     virtual void traverse(TIntermTraverser*);
getFlowOp()1243     TOperator getFlowOp() const { return flowOp; }
getExpression()1244     TIntermTyped* getExpression() const { return expression; }
setExpression(TIntermTyped * pExpression)1245     void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
1246     void updatePrecision(TPrecisionQualifier parentPrecision);
1247 protected:
1248     TOperator flowOp;
1249     TIntermTyped* expression;
1250 };
1251 
1252 //
1253 // Represent method names before seeing their calling signature
1254 // or resolving them to operations.  Just an expression as the base object
1255 // and a textural name.
1256 //
1257 class TIntermMethod : public TIntermTyped {
1258 public:
TIntermMethod(TIntermTyped * o,const TType & t,const TString & m)1259     TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
getAsMethodNode()1260     virtual       TIntermMethod* getAsMethodNode()       { return this; }
getAsMethodNode()1261     virtual const TIntermMethod* getAsMethodNode() const { return this; }
getMethodName()1262     virtual const TString& getMethodName() const { return method; }
getObject()1263     virtual TIntermTyped* getObject() const { return object; }
1264     virtual void traverse(TIntermTraverser*);
1265 protected:
1266     TIntermTyped* object;
1267     TString method;
1268 };
1269 
1270 //
1271 // Nodes that correspond to symbols or constants in the source code.
1272 //
1273 class TIntermSymbol : public TIntermTyped {
1274 public:
1275     // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
1276     // per process threadPoolAllocator, then it causes increased memory usage per compile
1277     // it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i,const TString & n,const TType & t)1278     TIntermSymbol(int i, const TString& n, const TType& t)
1279         : TIntermTyped(t), id(i),
1280 #ifndef GLSLANG_WEB
1281         flattenSubset(-1),
1282 #endif
1283         constSubtree(nullptr)
1284           { name = n; }
getId()1285     virtual int getId() const { return id; }
changeId(int i)1286     virtual void changeId(int i) { id = i; }
getName()1287     virtual const TString& getName() const { return name; }
1288     virtual void traverse(TIntermTraverser*);
getAsSymbolNode()1289     virtual       TIntermSymbol* getAsSymbolNode()       { return this; }
getAsSymbolNode()1290     virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
setConstArray(const TConstUnionArray & c)1291     void setConstArray(const TConstUnionArray& c) { constArray = c; }
getConstArray()1292     const TConstUnionArray& getConstArray() const { return constArray; }
setConstSubtree(TIntermTyped * subtree)1293     void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
getConstSubtree()1294     TIntermTyped* getConstSubtree() const { return constSubtree; }
1295 #ifndef GLSLANG_WEB
setFlattenSubset(int subset)1296     void setFlattenSubset(int subset) { flattenSubset = subset; }
1297     virtual const TString& getAccessName() const;
1298 
getFlattenSubset()1299     int getFlattenSubset() const { return flattenSubset; } // -1 means full object
1300 #endif
1301 
1302     // This is meant for cases where a node has already been constructed, and
1303     // later on, it becomes necessary to switch to a different symbol.
switchId(int newId)1304     virtual void switchId(int newId) { id = newId; }
1305 
1306 protected:
1307     int id;                      // the unique id of the symbol this node represents
1308 #ifndef GLSLANG_WEB
1309     int flattenSubset;           // how deeply the flattened object rooted at id has been dereferenced
1310 #endif
1311     TString name;                // the name of the symbol this node represents
1312     TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
1313     TIntermTyped* constSubtree;
1314 };
1315 
1316 class TIntermConstantUnion : public TIntermTyped {
1317 public:
TIntermConstantUnion(const TConstUnionArray & ua,const TType & t)1318     TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
getConstArray()1319     const TConstUnionArray& getConstArray() const { return constArray; }
getAsConstantUnion()1320     virtual       TIntermConstantUnion* getAsConstantUnion()       { return this; }
getAsConstantUnion()1321     virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
1322     virtual void traverse(TIntermTraverser*);
1323     virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
1324     virtual TIntermTyped* fold(TOperator, const TType&) const;
setLiteral()1325     void setLiteral() { literal = true; }
setExpression()1326     void setExpression() { literal = false; }
isLiteral()1327     bool isLiteral() const { return literal; }
1328 
1329 protected:
1330     TIntermConstantUnion& operator=(const TIntermConstantUnion&);
1331 
1332     const TConstUnionArray constArray;
1333     bool literal;  // true if node represents a literal in the source code
1334 };
1335 
1336 // Represent the independent aspects of a texturing TOperator
1337 struct TCrackedTextureOp {
1338     bool query;
1339     bool proj;
1340     bool lod;
1341     bool fetch;
1342     bool offset;
1343     bool offsets;
1344     bool gather;
1345     bool grad;
1346     bool subpass;
1347     bool lodClamp;
1348     bool fragMask;
1349 };
1350 
1351 //
1352 // Intermediate class for node types that hold operators.
1353 //
1354 class TIntermOperator : public TIntermTyped {
1355 public:
getAsOperator()1356     virtual       TIntermOperator* getAsOperator()       { return this; }
getAsOperator()1357     virtual const TIntermOperator* getAsOperator() const { return this; }
getOp()1358     TOperator getOp() const { return op; }
setOp(TOperator newOp)1359     void setOp(TOperator newOp) { op = newOp; }
1360     bool modifiesState() const;
1361     bool isConstructor() const;
isTexture()1362     bool isTexture()  const { return op > EOpTextureGuardBegin  && op < EOpTextureGuardEnd; }
isSampling()1363     bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
1364 #ifdef GLSLANG_WEB
isImage()1365     bool isImage()          const { return false; }
isSparseTexture()1366     bool isSparseTexture()  const { return false; }
isImageFootprint()1367     bool isImageFootprint() const { return false; }
isSparseImage()1368     bool isSparseImage()    const { return false; }
isSubgroup()1369     bool isSubgroup()       const { return false; }
1370 #else
isImage()1371     bool isImage()    const { return op > EOpImageGuardBegin    && op < EOpImageGuardEnd; }
isSparseTexture()1372     bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
isImageFootprint()1373     bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
isSparseImage()1374     bool isSparseImage()   const { return op == EOpSparseImageLoad; }
isSubgroup()1375     bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
1376 #endif
1377 
setOperationPrecision(TPrecisionQualifier p)1378     void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
getOperationPrecision()1379     TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
1380                                                                                      operationPrecision :
1381                                                                                      type.getQualifier().precision; }
getCompleteString()1382     TString getCompleteString() const
1383     {
1384         TString cs = type.getCompleteString();
1385         if (getOperationPrecision() != type.getQualifier().precision) {
1386             cs += ", operation at ";
1387             cs += GetPrecisionQualifierString(getOperationPrecision());
1388         }
1389 
1390         return cs;
1391     }
1392 
1393     // Crack the op into the individual dimensions of texturing operation.
crackTexture(TSampler sampler,TCrackedTextureOp & cracked)1394     void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
1395     {
1396         cracked.query = false;
1397         cracked.proj = false;
1398         cracked.lod = false;
1399         cracked.fetch = false;
1400         cracked.offset = false;
1401         cracked.offsets = false;
1402         cracked.gather = false;
1403         cracked.grad = false;
1404         cracked.subpass = false;
1405         cracked.lodClamp = false;
1406         cracked.fragMask = false;
1407 
1408         switch (op) {
1409         case EOpImageQuerySize:
1410         case EOpImageQuerySamples:
1411         case EOpTextureQuerySize:
1412         case EOpTextureQueryLod:
1413         case EOpTextureQueryLevels:
1414         case EOpTextureQuerySamples:
1415         case EOpSparseTexelsResident:
1416             cracked.query = true;
1417             break;
1418         case EOpTexture:
1419         case EOpSparseTexture:
1420             break;
1421         case EOpTextureProj:
1422             cracked.proj = true;
1423             break;
1424         case EOpTextureLod:
1425         case EOpSparseTextureLod:
1426             cracked.lod = true;
1427             break;
1428         case EOpTextureOffset:
1429         case EOpSparseTextureOffset:
1430             cracked.offset = true;
1431             break;
1432         case EOpTextureFetch:
1433         case EOpSparseTextureFetch:
1434             cracked.fetch = true;
1435             if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1436                 cracked.lod = true;
1437             break;
1438         case EOpTextureFetchOffset:
1439         case EOpSparseTextureFetchOffset:
1440             cracked.fetch = true;
1441             cracked.offset = true;
1442             if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1443                 cracked.lod = true;
1444             break;
1445         case EOpTextureProjOffset:
1446             cracked.offset = true;
1447             cracked.proj = true;
1448             break;
1449         case EOpTextureLodOffset:
1450         case EOpSparseTextureLodOffset:
1451             cracked.offset = true;
1452             cracked.lod = true;
1453             break;
1454         case EOpTextureProjLod:
1455             cracked.lod = true;
1456             cracked.proj = true;
1457             break;
1458         case EOpTextureProjLodOffset:
1459             cracked.offset = true;
1460             cracked.lod = true;
1461             cracked.proj = true;
1462             break;
1463         case EOpTextureGrad:
1464         case EOpSparseTextureGrad:
1465             cracked.grad = true;
1466             break;
1467         case EOpTextureGradOffset:
1468         case EOpSparseTextureGradOffset:
1469             cracked.grad = true;
1470             cracked.offset = true;
1471             break;
1472         case EOpTextureProjGrad:
1473             cracked.grad = true;
1474             cracked.proj = true;
1475             break;
1476         case EOpTextureProjGradOffset:
1477             cracked.grad = true;
1478             cracked.offset = true;
1479             cracked.proj = true;
1480             break;
1481 #ifndef GLSLANG_WEB
1482         case EOpTextureClamp:
1483         case EOpSparseTextureClamp:
1484             cracked.lodClamp = true;
1485             break;
1486         case EOpTextureOffsetClamp:
1487         case EOpSparseTextureOffsetClamp:
1488             cracked.offset = true;
1489             cracked.lodClamp = true;
1490             break;
1491         case EOpTextureGradClamp:
1492         case EOpSparseTextureGradClamp:
1493             cracked.grad = true;
1494             cracked.lodClamp = true;
1495             break;
1496         case EOpTextureGradOffsetClamp:
1497         case EOpSparseTextureGradOffsetClamp:
1498             cracked.grad = true;
1499             cracked.offset = true;
1500             cracked.lodClamp = true;
1501             break;
1502         case EOpTextureGather:
1503         case EOpSparseTextureGather:
1504             cracked.gather = true;
1505             break;
1506         case EOpTextureGatherOffset:
1507         case EOpSparseTextureGatherOffset:
1508             cracked.gather = true;
1509             cracked.offset = true;
1510             break;
1511         case EOpTextureGatherOffsets:
1512         case EOpSparseTextureGatherOffsets:
1513             cracked.gather = true;
1514             cracked.offsets = true;
1515             break;
1516         case EOpTextureGatherLod:
1517         case EOpSparseTextureGatherLod:
1518             cracked.gather = true;
1519             cracked.lod    = true;
1520             break;
1521         case EOpTextureGatherLodOffset:
1522         case EOpSparseTextureGatherLodOffset:
1523             cracked.gather = true;
1524             cracked.offset = true;
1525             cracked.lod    = true;
1526             break;
1527         case EOpTextureGatherLodOffsets:
1528         case EOpSparseTextureGatherLodOffsets:
1529             cracked.gather  = true;
1530             cracked.offsets = true;
1531             cracked.lod     = true;
1532             break;
1533         case EOpImageLoadLod:
1534         case EOpImageStoreLod:
1535         case EOpSparseImageLoadLod:
1536             cracked.lod = true;
1537             break;
1538         case EOpFragmentMaskFetch:
1539             cracked.subpass = sampler.dim == EsdSubpass;
1540             cracked.fragMask = true;
1541             break;
1542         case EOpFragmentFetch:
1543             cracked.subpass = sampler.dim == EsdSubpass;
1544             cracked.fragMask = true;
1545             break;
1546         case EOpImageSampleFootprintNV:
1547             break;
1548         case EOpImageSampleFootprintClampNV:
1549             cracked.lodClamp = true;
1550             break;
1551         case EOpImageSampleFootprintLodNV:
1552             cracked.lod = true;
1553             break;
1554         case EOpImageSampleFootprintGradNV:
1555             cracked.grad = true;
1556             break;
1557         case EOpImageSampleFootprintGradClampNV:
1558             cracked.lodClamp = true;
1559             cracked.grad = true;
1560             break;
1561         case EOpSubpassLoad:
1562         case EOpSubpassLoadMS:
1563             cracked.subpass = true;
1564             break;
1565 #endif
1566         default:
1567             break;
1568         }
1569     }
1570 
1571 protected:
TIntermOperator(TOperator o)1572     TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
TIntermOperator(TOperator o,TType & t)1573     TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
1574     TOperator op;
1575     // The result precision is in the inherited TType, and is usually meant to be both
1576     // the operation precision and the result precision. However, some more complex things,
1577     // like built-in function calls, distinguish between the two, in which case non-EqpNone
1578     // 'operationPrecision' overrides the result precision as far as operation precision
1579     // is concerned.
1580     TPrecisionQualifier operationPrecision;
1581 };
1582 
1583 //
1584 // Nodes for all the basic binary math operators.
1585 //
1586 class TIntermBinary : public TIntermOperator {
1587 public:
TIntermBinary(TOperator o)1588     TIntermBinary(TOperator o) : TIntermOperator(o) {}
1589     virtual void traverse(TIntermTraverser*);
setLeft(TIntermTyped * n)1590     virtual void setLeft(TIntermTyped* n) { left = n; }
setRight(TIntermTyped * n)1591     virtual void setRight(TIntermTyped* n) { right = n; }
getLeft()1592     virtual TIntermTyped* getLeft() const { return left; }
getRight()1593     virtual TIntermTyped* getRight() const { return right; }
getAsBinaryNode()1594     virtual       TIntermBinary* getAsBinaryNode()       { return this; }
getAsBinaryNode()1595     virtual const TIntermBinary* getAsBinaryNode() const { return this; }
1596     virtual void updatePrecision();
1597 protected:
1598     TIntermTyped* left;
1599     TIntermTyped* right;
1600 };
1601 
1602 //
1603 // Nodes for unary math operators.
1604 //
1605 class TIntermUnary : public TIntermOperator {
1606 public:
TIntermUnary(TOperator o,TType & t)1607     TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
TIntermUnary(TOperator o)1608     TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
1609     virtual void traverse(TIntermTraverser*);
setOperand(TIntermTyped * o)1610     virtual void setOperand(TIntermTyped* o) { operand = o; }
getOperand()1611     virtual       TIntermTyped* getOperand() { return operand; }
getOperand()1612     virtual const TIntermTyped* getOperand() const { return operand; }
getAsUnaryNode()1613     virtual       TIntermUnary* getAsUnaryNode()       { return this; }
getAsUnaryNode()1614     virtual const TIntermUnary* getAsUnaryNode() const { return this; }
1615     virtual void updatePrecision();
1616 protected:
1617     TIntermTyped* operand;
1618 };
1619 
1620 typedef TVector<TIntermNode*> TIntermSequence;
1621 typedef TVector<TStorageQualifier> TQualifierList;
1622 //
1623 // Nodes that operate on an arbitrary sized set of children.
1624 //
1625 class TIntermAggregate : public TIntermOperator {
1626 public:
TIntermAggregate()1627     TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
TIntermAggregate(TOperator o)1628     TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
~TIntermAggregate()1629     ~TIntermAggregate() { delete pragmaTable; }
getAsAggregate()1630     virtual       TIntermAggregate* getAsAggregate()       { return this; }
getAsAggregate()1631     virtual const TIntermAggregate* getAsAggregate() const { return this; }
setOperator(TOperator o)1632     virtual void setOperator(TOperator o) { op = o; }
getSequence()1633     virtual       TIntermSequence& getSequence()       { return sequence; }
getSequence()1634     virtual const TIntermSequence& getSequence() const { return sequence; }
setName(const TString & n)1635     virtual void setName(const TString& n) { name = n; }
getName()1636     virtual const TString& getName() const { return name; }
1637     virtual void traverse(TIntermTraverser*);
setUserDefined()1638     virtual void setUserDefined() { userDefined = true; }
isUserDefined()1639     virtual bool isUserDefined() { return userDefined; }
getQualifierList()1640     virtual TQualifierList& getQualifierList() { return qualifier; }
getQualifierList()1641     virtual const TQualifierList& getQualifierList() const { return qualifier; }
setOptimize(bool o)1642     void setOptimize(bool o) { optimize = o; }
setDebug(bool d)1643     void setDebug(bool d) { debug = d; }
getOptimize()1644     bool getOptimize() const { return optimize; }
getDebug()1645     bool getDebug() const { return debug; }
1646     void setPragmaTable(const TPragmaTable& pTable);
getPragmaTable()1647     const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
1648 protected:
1649     TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
1650     TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
1651     TIntermSequence sequence;
1652     TQualifierList qualifier;
1653     TString name;
1654     bool userDefined; // used for user defined function names
1655     bool optimize;
1656     bool debug;
1657     TPragmaTable* pragmaTable;
1658 };
1659 
1660 //
1661 // For if tests.
1662 //
1663 class TIntermSelection : public TIntermTyped {
1664 public:
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB)1665     TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
1666         TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
1667         shortCircuit(true),
1668         flatten(false), dontFlatten(false) {}
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB,const TType & type)1669     TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
1670         TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
1671         shortCircuit(true),
1672         flatten(false), dontFlatten(false) {}
1673     virtual void traverse(TIntermTraverser*);
getCondition()1674     virtual TIntermTyped* getCondition() const { return condition; }
getTrueBlock()1675     virtual TIntermNode* getTrueBlock() const { return trueBlock; }
getFalseBlock()1676     virtual TIntermNode* getFalseBlock() const { return falseBlock; }
getAsSelectionNode()1677     virtual       TIntermSelection* getAsSelectionNode()       { return this; }
getAsSelectionNode()1678     virtual const TIntermSelection* getAsSelectionNode() const { return this; }
1679 
setNoShortCircuit()1680     void setNoShortCircuit() { shortCircuit = false; }
getShortCircuit()1681     bool getShortCircuit() const { return shortCircuit; }
1682 
setFlatten()1683     void setFlatten()     { flatten = true; }
setDontFlatten()1684     void setDontFlatten() { dontFlatten = true; }
getFlatten()1685     bool getFlatten()     const { return flatten; }
getDontFlatten()1686     bool getDontFlatten() const { return dontFlatten; }
1687 
1688 protected:
1689     TIntermTyped* condition;
1690     TIntermNode* trueBlock;
1691     TIntermNode* falseBlock;
1692     bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
1693     bool flatten;      // true if flatten requested
1694     bool dontFlatten;  // true if requested to not flatten
1695 };
1696 
1697 //
1698 // For switch statements.  Designed use is that a switch will have sequence of nodes
1699 // that are either case/default nodes or a *single* node that represents all the code
1700 // in between (if any) consecutive case/defaults.  So, a traversal need only deal with
1701 // 0 or 1 nodes per case/default statement.
1702 //
1703 class TIntermSwitch : public TIntermNode {
1704 public:
TIntermSwitch(TIntermTyped * cond,TIntermAggregate * b)1705     TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
1706         flatten(false), dontFlatten(false) {}
1707     virtual void traverse(TIntermTraverser*);
getCondition()1708     virtual TIntermNode* getCondition() const { return condition; }
getBody()1709     virtual TIntermAggregate* getBody() const { return body; }
getAsSwitchNode()1710     virtual       TIntermSwitch* getAsSwitchNode()       { return this; }
getAsSwitchNode()1711     virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
1712 
setFlatten()1713     void setFlatten()     { flatten = true; }
setDontFlatten()1714     void setDontFlatten() { dontFlatten = true; }
getFlatten()1715     bool getFlatten()     const { return flatten; }
getDontFlatten()1716     bool getDontFlatten() const { return dontFlatten; }
1717 
1718 protected:
1719     TIntermTyped* condition;
1720     TIntermAggregate* body;
1721     bool flatten;     // true if flatten requested
1722     bool dontFlatten; // true if requested to not flatten
1723 };
1724 
1725 enum TVisit
1726 {
1727     EvPreVisit,
1728     EvInVisit,
1729     EvPostVisit
1730 };
1731 
1732 //
1733 // For traversing the tree.  User should derive from this,
1734 // put their traversal specific data in it, and then pass
1735 // it to a Traverse method.
1736 //
1737 // When using this, just fill in the methods for nodes you want visited.
1738 // Return false from a pre-visit to skip visiting that node's subtree.
1739 //
1740 // Explicitly set postVisit to true if you want post visiting, otherwise,
1741 // filled in methods will only be called at pre-visit time (before processing
1742 // the subtree).  Similarly for inVisit for in-order visiting of nodes with
1743 // multiple children.
1744 //
1745 // If you only want post-visits, explicitly turn off preVisit (and inVisit)
1746 // and turn on postVisit.
1747 //
1748 // In general, for the visit*() methods, return true from interior nodes
1749 // to have the traversal continue on to children.
1750 //
1751 // If you process children yourself, or don't want them processed, return false.
1752 //
1753 class TIntermTraverser {
1754 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1755     POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1756     TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
1757             preVisit(preVisit),
1758             inVisit(inVisit),
1759             postVisit(postVisit),
1760             rightToLeft(rightToLeft),
1761             depth(0),
1762             maxDepth(0) { }
~TIntermTraverser()1763     virtual ~TIntermTraverser() { }
1764 
visitSymbol(TIntermSymbol *)1765     virtual void visitSymbol(TIntermSymbol*)               { }
visitConstantUnion(TIntermConstantUnion *)1766     virtual void visitConstantUnion(TIntermConstantUnion*) { }
visitBinary(TVisit,TIntermBinary *)1767     virtual bool visitBinary(TVisit, TIntermBinary*)       { return true; }
visitUnary(TVisit,TIntermUnary *)1768     virtual bool visitUnary(TVisit, TIntermUnary*)         { return true; }
visitSelection(TVisit,TIntermSelection *)1769     virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
visitAggregate(TVisit,TIntermAggregate *)1770     virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
visitLoop(TVisit,TIntermLoop *)1771     virtual bool visitLoop(TVisit, TIntermLoop*)           { return true; }
visitBranch(TVisit,TIntermBranch *)1772     virtual bool visitBranch(TVisit, TIntermBranch*)       { return true; }
visitSwitch(TVisit,TIntermSwitch *)1773     virtual bool visitSwitch(TVisit, TIntermSwitch*)       { return true; }
1774 
getMaxDepth()1775     int getMaxDepth() const { return maxDepth; }
1776 
incrementDepth(TIntermNode * current)1777     void incrementDepth(TIntermNode *current)
1778     {
1779         depth++;
1780         maxDepth = (std::max)(maxDepth, depth);
1781         path.push_back(current);
1782     }
1783 
decrementDepth()1784     void decrementDepth()
1785     {
1786         depth--;
1787         path.pop_back();
1788     }
1789 
getParentNode()1790     TIntermNode *getParentNode()
1791     {
1792         return path.size() == 0 ? NULL : path.back();
1793     }
1794 
1795     const bool preVisit;
1796     const bool inVisit;
1797     const bool postVisit;
1798     const bool rightToLeft;
1799 
1800 protected:
1801     TIntermTraverser& operator=(TIntermTraverser&);
1802 
1803     int depth;
1804     int maxDepth;
1805 
1806     // All the nodes from root to the current node's parent during traversing.
1807     TVector<TIntermNode *> path;
1808 };
1809 
1810 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
1811 // sized with the same symbol, involving no operations"
SameSpecializationConstants(TIntermTyped * node1,TIntermTyped * node2)1812 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
1813 {
1814     return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
1815            node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
1816 }
1817 
1818 } // end namespace glslang
1819 
1820 #endif // __INTERMEDIATE_H
1821