1 /*
2  * Copyright 2018-2021 Arm Limited
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * At your option, you may choose to accept this material under either:
19  *  1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
20  *  2. The MIT License, found at <http://opensource.org/licenses/MIT>.
21  * SPDX-License-Identifier: Apache-2.0 OR MIT.
22  */
23 
24 #include "spirv_parser.hpp"
25 #include <assert.h>
26 
27 using namespace std;
28 using namespace spv;
29 
30 namespace SPIRV_CROSS_NAMESPACE
31 {
Parser(vector<uint32_t> spirv)32 Parser::Parser(vector<uint32_t> spirv)
33 {
34 	ir.spirv = move(spirv);
35 }
36 
Parser(const uint32_t * spirv_data,size_t word_count)37 Parser::Parser(const uint32_t *spirv_data, size_t word_count)
38 {
39 	ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
40 }
41 
decoration_is_string(Decoration decoration)42 static bool decoration_is_string(Decoration decoration)
43 {
44 	switch (decoration)
45 	{
46 	case DecorationHlslSemanticGOOGLE:
47 		return true;
48 
49 	default:
50 		return false;
51 	}
52 }
53 
swap_endian(uint32_t v)54 static inline uint32_t swap_endian(uint32_t v)
55 {
56 	return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
57 }
58 
is_valid_spirv_version(uint32_t version)59 static bool is_valid_spirv_version(uint32_t version)
60 {
61 	switch (version)
62 	{
63 	// Allow v99 since it tends to just work.
64 	case 99:
65 	case 0x10000: // SPIR-V 1.0
66 	case 0x10100: // SPIR-V 1.1
67 	case 0x10200: // SPIR-V 1.2
68 	case 0x10300: // SPIR-V 1.3
69 	case 0x10400: // SPIR-V 1.4
70 	case 0x10500: // SPIR-V 1.5
71 		return true;
72 
73 	default:
74 		return false;
75 	}
76 }
77 
parse()78 void Parser::parse()
79 {
80 	auto &spirv = ir.spirv;
81 
82 	auto len = spirv.size();
83 	if (len < 5)
84 		SPIRV_CROSS_THROW("SPIRV file too small.");
85 
86 	auto s = spirv.data();
87 
88 	// Endian-swap if we need to.
89 	if (s[0] == swap_endian(MagicNumber))
90 		transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
91 
92 	if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
93 		SPIRV_CROSS_THROW("Invalid SPIRV format.");
94 
95 	uint32_t bound = s[3];
96 
97 	const uint32_t MaximumNumberOfIDs = 0x3fffff;
98 	if (bound > MaximumNumberOfIDs)
99 		SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
100 
101 	ir.set_id_bounds(bound);
102 
103 	uint32_t offset = 5;
104 
105 	SmallVector<Instruction> instructions;
106 	while (offset < len)
107 	{
108 		Instruction instr = {};
109 		instr.op = spirv[offset] & 0xffff;
110 		instr.count = (spirv[offset] >> 16) & 0xffff;
111 
112 		if (instr.count == 0)
113 			SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
114 
115 		instr.offset = offset + 1;
116 		instr.length = instr.count - 1;
117 
118 		offset += instr.count;
119 
120 		if (offset > spirv.size())
121 			SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
122 
123 		instructions.push_back(instr);
124 	}
125 
126 	for (auto &i : instructions)
127 		parse(i);
128 
129 	for (auto &fixup : forward_pointer_fixups)
130 	{
131 		auto &target = get<SPIRType>(fixup.first);
132 		auto &source = get<SPIRType>(fixup.second);
133 		target.member_types = source.member_types;
134 		target.basetype = source.basetype;
135 		target.self = source.self;
136 	}
137 	forward_pointer_fixups.clear();
138 
139 	if (current_function)
140 		SPIRV_CROSS_THROW("Function was not terminated.");
141 	if (current_block)
142 		SPIRV_CROSS_THROW("Block was not terminated.");
143 }
144 
stream(const Instruction & instr) const145 const uint32_t *Parser::stream(const Instruction &instr) const
146 {
147 	// If we're not going to use any arguments, just return nullptr.
148 	// We want to avoid case where we return an out of range pointer
149 	// that trips debug assertions on some platforms.
150 	if (!instr.length)
151 		return nullptr;
152 
153 	if (instr.offset + instr.length > ir.spirv.size())
154 		SPIRV_CROSS_THROW("Compiler::stream() out of range.");
155 	return &ir.spirv[instr.offset];
156 }
157 
extract_string(const vector<uint32_t> & spirv,uint32_t offset)158 static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
159 {
160 	string ret;
161 	for (uint32_t i = offset; i < spirv.size(); i++)
162 	{
163 		uint32_t w = spirv[i];
164 
165 		for (uint32_t j = 0; j < 4; j++, w >>= 8)
166 		{
167 			char c = w & 0xff;
168 			if (c == '\0')
169 				return ret;
170 			ret += c;
171 		}
172 	}
173 
174 	SPIRV_CROSS_THROW("String was not terminated before EOF");
175 }
176 
parse(const Instruction & instruction)177 void Parser::parse(const Instruction &instruction)
178 {
179 	auto *ops = stream(instruction);
180 	auto op = static_cast<Op>(instruction.op);
181 	uint32_t length = instruction.length;
182 
183 	switch (op)
184 	{
185 	case OpSourceContinued:
186 	case OpSourceExtension:
187 	case OpNop:
188 	case OpModuleProcessed:
189 		break;
190 
191 	case OpString:
192 	{
193 		set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
194 		break;
195 	}
196 
197 	case OpMemoryModel:
198 		ir.addressing_model = static_cast<AddressingModel>(ops[0]);
199 		ir.memory_model = static_cast<MemoryModel>(ops[1]);
200 		break;
201 
202 	case OpSource:
203 	{
204 		auto lang = static_cast<SourceLanguage>(ops[0]);
205 		switch (lang)
206 		{
207 		case SourceLanguageESSL:
208 			ir.source.es = true;
209 			ir.source.version = ops[1];
210 			ir.source.known = true;
211 			ir.source.hlsl = false;
212 			break;
213 
214 		case SourceLanguageGLSL:
215 			ir.source.es = false;
216 			ir.source.version = ops[1];
217 			ir.source.known = true;
218 			ir.source.hlsl = false;
219 			break;
220 
221 		case SourceLanguageHLSL:
222 			// For purposes of cross-compiling, this is GLSL 450.
223 			ir.source.es = false;
224 			ir.source.version = 450;
225 			ir.source.known = true;
226 			ir.source.hlsl = true;
227 			break;
228 
229 		default:
230 			ir.source.known = false;
231 			break;
232 		}
233 		break;
234 	}
235 
236 	case OpUndef:
237 	{
238 		uint32_t result_type = ops[0];
239 		uint32_t id = ops[1];
240 		set<SPIRUndef>(id, result_type);
241 		if (current_block)
242 			current_block->ops.push_back(instruction);
243 		break;
244 	}
245 
246 	case OpCapability:
247 	{
248 		uint32_t cap = ops[0];
249 		if (cap == CapabilityKernel)
250 			SPIRV_CROSS_THROW("Kernel capability not supported.");
251 
252 		ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
253 		break;
254 	}
255 
256 	case OpExtension:
257 	{
258 		auto ext = extract_string(ir.spirv, instruction.offset);
259 		ir.declared_extensions.push_back(move(ext));
260 		break;
261 	}
262 
263 	case OpExtInstImport:
264 	{
265 		uint32_t id = ops[0];
266 		auto ext = extract_string(ir.spirv, instruction.offset + 1);
267 		if (ext == "GLSL.std.450")
268 			set<SPIRExtension>(id, SPIRExtension::GLSL);
269 		else if (ext == "DebugInfo")
270 			set<SPIRExtension>(id, SPIRExtension::SPV_debug_info);
271 		else if (ext == "SPV_AMD_shader_ballot")
272 			set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot);
273 		else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
274 			set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
275 		else if (ext == "SPV_AMD_shader_trinary_minmax")
276 			set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax);
277 		else if (ext == "SPV_AMD_gcn_shader")
278 			set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader);
279 		else
280 			set<SPIRExtension>(id, SPIRExtension::Unsupported);
281 
282 		// Other SPIR-V extensions which have ExtInstrs are currently not supported.
283 
284 		break;
285 	}
286 
287 	case OpExtInst:
288 	{
289 		// The SPIR-V debug information extended instructions might come at global scope.
290 		if (current_block)
291 			current_block->ops.push_back(instruction);
292 		break;
293 	}
294 
295 	case OpEntryPoint:
296 	{
297 		auto itr =
298 		    ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
299 		                                                            extract_string(ir.spirv, instruction.offset + 2))));
300 		auto &e = itr.first->second;
301 
302 		// Strings need nul-terminator and consume the whole word.
303 		uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
304 
305 		for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
306 			e.interface_variables.push_back(ops[i]);
307 
308 		// Set the name of the entry point in case OpName is not provided later.
309 		ir.set_name(ops[1], e.name);
310 
311 		// If we don't have an entry, make the first one our "default".
312 		if (!ir.default_entry_point)
313 			ir.default_entry_point = ops[1];
314 		break;
315 	}
316 
317 	case OpExecutionMode:
318 	{
319 		auto &execution = ir.entry_points[ops[0]];
320 		auto mode = static_cast<ExecutionMode>(ops[1]);
321 		execution.flags.set(mode);
322 
323 		switch (mode)
324 		{
325 		case ExecutionModeInvocations:
326 			execution.invocations = ops[2];
327 			break;
328 
329 		case ExecutionModeLocalSize:
330 			execution.workgroup_size.x = ops[2];
331 			execution.workgroup_size.y = ops[3];
332 			execution.workgroup_size.z = ops[4];
333 			break;
334 
335 		case ExecutionModeOutputVertices:
336 			execution.output_vertices = ops[2];
337 			break;
338 
339 		default:
340 			break;
341 		}
342 		break;
343 	}
344 
345 	case OpName:
346 	{
347 		uint32_t id = ops[0];
348 		ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
349 		break;
350 	}
351 
352 	case OpMemberName:
353 	{
354 		uint32_t id = ops[0];
355 		uint32_t member = ops[1];
356 		ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
357 		break;
358 	}
359 
360 	case OpDecorationGroup:
361 	{
362 		// Noop, this simply means an ID should be a collector of decorations.
363 		// The meta array is already a flat array of decorations which will contain the relevant decorations.
364 		break;
365 	}
366 
367 	case OpGroupDecorate:
368 	{
369 		uint32_t group_id = ops[0];
370 		auto &decorations = ir.meta[group_id].decoration;
371 		auto &flags = decorations.decoration_flags;
372 
373 		// Copies decorations from one ID to another. Only copy decorations which are set in the group,
374 		// i.e., we cannot just copy the meta structure directly.
375 		for (uint32_t i = 1; i < length; i++)
376 		{
377 			uint32_t target = ops[i];
378 			flags.for_each_bit([&](uint32_t bit) {
379 				auto decoration = static_cast<Decoration>(bit);
380 
381 				if (decoration_is_string(decoration))
382 				{
383 					ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
384 				}
385 				else
386 				{
387 					ir.meta[target].decoration_word_offset[decoration] =
388 					    ir.meta[group_id].decoration_word_offset[decoration];
389 					ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
390 				}
391 			});
392 		}
393 		break;
394 	}
395 
396 	case OpGroupMemberDecorate:
397 	{
398 		uint32_t group_id = ops[0];
399 		auto &flags = ir.meta[group_id].decoration.decoration_flags;
400 
401 		// Copies decorations from one ID to another. Only copy decorations which are set in the group,
402 		// i.e., we cannot just copy the meta structure directly.
403 		for (uint32_t i = 1; i + 1 < length; i += 2)
404 		{
405 			uint32_t target = ops[i + 0];
406 			uint32_t index = ops[i + 1];
407 			flags.for_each_bit([&](uint32_t bit) {
408 				auto decoration = static_cast<Decoration>(bit);
409 
410 				if (decoration_is_string(decoration))
411 					ir.set_member_decoration_string(target, index, decoration,
412 					                                ir.get_decoration_string(group_id, decoration));
413 				else
414 					ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
415 			});
416 		}
417 		break;
418 	}
419 
420 	case OpDecorate:
421 	case OpDecorateId:
422 	{
423 		// OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
424 		// so merge decorate and decorate-id here.
425 		uint32_t id = ops[0];
426 
427 		auto decoration = static_cast<Decoration>(ops[1]);
428 		if (length >= 3)
429 		{
430 			ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
431 			ir.set_decoration(id, decoration, ops[2]);
432 		}
433 		else
434 			ir.set_decoration(id, decoration);
435 
436 		break;
437 	}
438 
439 	case OpDecorateStringGOOGLE:
440 	{
441 		uint32_t id = ops[0];
442 		auto decoration = static_cast<Decoration>(ops[1]);
443 		ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
444 		break;
445 	}
446 
447 	case OpMemberDecorate:
448 	{
449 		uint32_t id = ops[0];
450 		uint32_t member = ops[1];
451 		auto decoration = static_cast<Decoration>(ops[2]);
452 		if (length >= 4)
453 			ir.set_member_decoration(id, member, decoration, ops[3]);
454 		else
455 			ir.set_member_decoration(id, member, decoration);
456 		break;
457 	}
458 
459 	case OpMemberDecorateStringGOOGLE:
460 	{
461 		uint32_t id = ops[0];
462 		uint32_t member = ops[1];
463 		auto decoration = static_cast<Decoration>(ops[2]);
464 		ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
465 		break;
466 	}
467 
468 	// Build up basic types.
469 	case OpTypeVoid:
470 	{
471 		uint32_t id = ops[0];
472 		auto &type = set<SPIRType>(id);
473 		type.basetype = SPIRType::Void;
474 		break;
475 	}
476 
477 	case OpTypeBool:
478 	{
479 		uint32_t id = ops[0];
480 		auto &type = set<SPIRType>(id);
481 		type.basetype = SPIRType::Boolean;
482 		type.width = 1;
483 		break;
484 	}
485 
486 	case OpTypeFloat:
487 	{
488 		uint32_t id = ops[0];
489 		uint32_t width = ops[1];
490 		auto &type = set<SPIRType>(id);
491 		if (width == 64)
492 			type.basetype = SPIRType::Double;
493 		else if (width == 32)
494 			type.basetype = SPIRType::Float;
495 		else if (width == 16)
496 			type.basetype = SPIRType::Half;
497 		else
498 			SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
499 		type.width = width;
500 		break;
501 	}
502 
503 	case OpTypeInt:
504 	{
505 		uint32_t id = ops[0];
506 		uint32_t width = ops[1];
507 		bool signedness = ops[2] != 0;
508 		auto &type = set<SPIRType>(id);
509 		type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
510 		type.width = width;
511 		break;
512 	}
513 
514 	// Build composite types by "inheriting".
515 	// NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
516 	// since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
517 	case OpTypeVector:
518 	{
519 		uint32_t id = ops[0];
520 		uint32_t vecsize = ops[2];
521 
522 		auto &base = get<SPIRType>(ops[1]);
523 		auto &vecbase = set<SPIRType>(id);
524 
525 		vecbase = base;
526 		vecbase.vecsize = vecsize;
527 		vecbase.self = id;
528 		vecbase.parent_type = ops[1];
529 		break;
530 	}
531 
532 	case OpTypeMatrix:
533 	{
534 		uint32_t id = ops[0];
535 		uint32_t colcount = ops[2];
536 
537 		auto &base = get<SPIRType>(ops[1]);
538 		auto &matrixbase = set<SPIRType>(id);
539 
540 		matrixbase = base;
541 		matrixbase.columns = colcount;
542 		matrixbase.self = id;
543 		matrixbase.parent_type = ops[1];
544 		break;
545 	}
546 
547 	case OpTypeArray:
548 	{
549 		uint32_t id = ops[0];
550 		auto &arraybase = set<SPIRType>(id);
551 
552 		uint32_t tid = ops[1];
553 		auto &base = get<SPIRType>(tid);
554 
555 		arraybase = base;
556 		arraybase.parent_type = tid;
557 
558 		uint32_t cid = ops[2];
559 		ir.mark_used_as_array_length(cid);
560 		auto *c = maybe_get<SPIRConstant>(cid);
561 		bool literal = c && !c->specialization;
562 
563 		// We're copying type information into Array types, so we'll need a fixup for any physical pointer
564 		// references.
565 		if (base.forward_pointer)
566 			forward_pointer_fixups.push_back({ id, tid });
567 
568 		arraybase.array_size_literal.push_back(literal);
569 		arraybase.array.push_back(literal ? c->scalar() : cid);
570 		// Do NOT set arraybase.self!
571 		break;
572 	}
573 
574 	case OpTypeRuntimeArray:
575 	{
576 		uint32_t id = ops[0];
577 
578 		auto &base = get<SPIRType>(ops[1]);
579 		auto &arraybase = set<SPIRType>(id);
580 
581 		// We're copying type information into Array types, so we'll need a fixup for any physical pointer
582 		// references.
583 		if (base.forward_pointer)
584 			forward_pointer_fixups.push_back({ id, ops[1] });
585 
586 		arraybase = base;
587 		arraybase.array.push_back(0);
588 		arraybase.array_size_literal.push_back(true);
589 		arraybase.parent_type = ops[1];
590 		// Do NOT set arraybase.self!
591 		break;
592 	}
593 
594 	case OpTypeImage:
595 	{
596 		uint32_t id = ops[0];
597 		auto &type = set<SPIRType>(id);
598 		type.basetype = SPIRType::Image;
599 		type.image.type = ops[1];
600 		type.image.dim = static_cast<Dim>(ops[2]);
601 		type.image.depth = ops[3] == 1;
602 		type.image.arrayed = ops[4] != 0;
603 		type.image.ms = ops[5] != 0;
604 		type.image.sampled = ops[6];
605 		type.image.format = static_cast<ImageFormat>(ops[7]);
606 		type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
607 		break;
608 	}
609 
610 	case OpTypeSampledImage:
611 	{
612 		uint32_t id = ops[0];
613 		uint32_t imagetype = ops[1];
614 		auto &type = set<SPIRType>(id);
615 		type = get<SPIRType>(imagetype);
616 		type.basetype = SPIRType::SampledImage;
617 		type.self = id;
618 		break;
619 	}
620 
621 	case OpTypeSampler:
622 	{
623 		uint32_t id = ops[0];
624 		auto &type = set<SPIRType>(id);
625 		type.basetype = SPIRType::Sampler;
626 		break;
627 	}
628 
629 	case OpTypePointer:
630 	{
631 		uint32_t id = ops[0];
632 
633 		// Very rarely, we might receive a FunctionPrototype here.
634 		// We won't be able to compile it, but we shouldn't crash when parsing.
635 		// We should be able to reflect.
636 		auto *base = maybe_get<SPIRType>(ops[2]);
637 		auto &ptrbase = set<SPIRType>(id);
638 
639 		if (base)
640 			ptrbase = *base;
641 
642 		ptrbase.pointer = true;
643 		ptrbase.pointer_depth++;
644 		ptrbase.storage = static_cast<StorageClass>(ops[1]);
645 
646 		if (ptrbase.storage == StorageClassAtomicCounter)
647 			ptrbase.basetype = SPIRType::AtomicCounter;
648 
649 		if (base && base->forward_pointer)
650 			forward_pointer_fixups.push_back({ id, ops[2] });
651 
652 		ptrbase.parent_type = ops[2];
653 
654 		// Do NOT set ptrbase.self!
655 		break;
656 	}
657 
658 	case OpTypeForwardPointer:
659 	{
660 		uint32_t id = ops[0];
661 		auto &ptrbase = set<SPIRType>(id);
662 		ptrbase.pointer = true;
663 		ptrbase.pointer_depth++;
664 		ptrbase.storage = static_cast<StorageClass>(ops[1]);
665 		ptrbase.forward_pointer = true;
666 
667 		if (ptrbase.storage == StorageClassAtomicCounter)
668 			ptrbase.basetype = SPIRType::AtomicCounter;
669 
670 		break;
671 	}
672 
673 	case OpTypeStruct:
674 	{
675 		uint32_t id = ops[0];
676 		auto &type = set<SPIRType>(id);
677 		type.basetype = SPIRType::Struct;
678 		for (uint32_t i = 1; i < length; i++)
679 			type.member_types.push_back(ops[i]);
680 
681 		// Check if we have seen this struct type before, with just different
682 		// decorations.
683 		//
684 		// Add workaround for issue #17 as well by looking at OpName for the struct
685 		// types, which we shouldn't normally do.
686 		// We should not normally have to consider type aliases like this to begin with
687 		// however ... glslang issues #304, #307 cover this.
688 
689 		// For stripped names, never consider struct type aliasing.
690 		// We risk declaring the same struct multiple times, but type-punning is not allowed
691 		// so this is safe.
692 		bool consider_aliasing = !ir.get_name(type.self).empty();
693 		if (consider_aliasing)
694 		{
695 			for (auto &other : global_struct_cache)
696 			{
697 				if (ir.get_name(type.self) == ir.get_name(other) &&
698 				    types_are_logically_equivalent(type, get<SPIRType>(other)))
699 				{
700 					type.type_alias = other;
701 					break;
702 				}
703 			}
704 
705 			if (type.type_alias == TypeID(0))
706 				global_struct_cache.push_back(id);
707 		}
708 		break;
709 	}
710 
711 	case OpTypeFunction:
712 	{
713 		uint32_t id = ops[0];
714 		uint32_t ret = ops[1];
715 
716 		auto &func = set<SPIRFunctionPrototype>(id, ret);
717 		for (uint32_t i = 2; i < length; i++)
718 			func.parameter_types.push_back(ops[i]);
719 		break;
720 	}
721 
722 	case OpTypeAccelerationStructureKHR:
723 	{
724 		uint32_t id = ops[0];
725 		auto &type = set<SPIRType>(id);
726 		type.basetype = SPIRType::AccelerationStructure;
727 		break;
728 	}
729 
730 	case OpTypeRayQueryKHR:
731 	{
732 		uint32_t id = ops[0];
733 		auto &type = set<SPIRType>(id);
734 		type.basetype = SPIRType::RayQuery;
735 		break;
736 	}
737 
738 	// Variable declaration
739 	// All variables are essentially pointers with a storage qualifier.
740 	case OpVariable:
741 	{
742 		uint32_t type = ops[0];
743 		uint32_t id = ops[1];
744 		auto storage = static_cast<StorageClass>(ops[2]);
745 		uint32_t initializer = length == 4 ? ops[3] : 0;
746 
747 		if (storage == StorageClassFunction)
748 		{
749 			if (!current_function)
750 				SPIRV_CROSS_THROW("No function currently in scope");
751 			current_function->add_local_variable(id);
752 		}
753 
754 		set<SPIRVariable>(id, type, storage, initializer);
755 		break;
756 	}
757 
758 	// OpPhi
759 	// OpPhi is a fairly magical opcode.
760 	// It selects temporary variables based on which parent block we *came from*.
761 	// In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
762 	// variable to emulate SSA Phi.
763 	case OpPhi:
764 	{
765 		if (!current_function)
766 			SPIRV_CROSS_THROW("No function currently in scope");
767 		if (!current_block)
768 			SPIRV_CROSS_THROW("No block currently in scope");
769 
770 		uint32_t result_type = ops[0];
771 		uint32_t id = ops[1];
772 
773 		// Instead of a temporary, create a new function-wide temporary with this ID instead.
774 		auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
775 		var.phi_variable = true;
776 
777 		current_function->add_local_variable(id);
778 
779 		for (uint32_t i = 2; i + 2 <= length; i += 2)
780 			current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
781 		break;
782 	}
783 
784 		// Constants
785 	case OpSpecConstant:
786 	case OpConstant:
787 	{
788 		uint32_t id = ops[1];
789 		auto &type = get<SPIRType>(ops[0]);
790 
791 		if (type.width > 32)
792 			set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
793 		else
794 			set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
795 		break;
796 	}
797 
798 	case OpSpecConstantFalse:
799 	case OpConstantFalse:
800 	{
801 		uint32_t id = ops[1];
802 		set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
803 		break;
804 	}
805 
806 	case OpSpecConstantTrue:
807 	case OpConstantTrue:
808 	{
809 		uint32_t id = ops[1];
810 		set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
811 		break;
812 	}
813 
814 	case OpConstantNull:
815 	{
816 		uint32_t id = ops[1];
817 		uint32_t type = ops[0];
818 		ir.make_constant_null(id, type, true);
819 		break;
820 	}
821 
822 	case OpSpecConstantComposite:
823 	case OpConstantComposite:
824 	{
825 		uint32_t id = ops[1];
826 		uint32_t type = ops[0];
827 
828 		auto &ctype = get<SPIRType>(type);
829 
830 		// We can have constants which are structs and arrays.
831 		// In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
832 		// can refer to.
833 		if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
834 		{
835 			set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
836 		}
837 		else
838 		{
839 			uint32_t elements = length - 2;
840 			if (elements > 4)
841 				SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
842 
843 			SPIRConstant remapped_constant_ops[4];
844 			const SPIRConstant *c[4];
845 			for (uint32_t i = 0; i < elements; i++)
846 			{
847 				// Specialization constants operations can also be part of this.
848 				// We do not know their value, so any attempt to query SPIRConstant later
849 				// will fail. We can only propagate the ID of the expression and use to_expression on it.
850 				auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
851 				auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
852 				if (constant_op)
853 				{
854 					if (op == OpConstantComposite)
855 						SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
856 
857 					remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
858 					remapped_constant_ops[i].self = constant_op->self;
859 					remapped_constant_ops[i].constant_type = constant_op->basetype;
860 					remapped_constant_ops[i].specialization = true;
861 					c[i] = &remapped_constant_ops[i];
862 				}
863 				else if (undef_op)
864 				{
865 					// Undefined, just pick 0.
866 					remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
867 					remapped_constant_ops[i].constant_type = undef_op->basetype;
868 					c[i] = &remapped_constant_ops[i];
869 				}
870 				else
871 					c[i] = &get<SPIRConstant>(ops[2 + i]);
872 			}
873 			set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
874 		}
875 		break;
876 	}
877 
878 	// Functions
879 	case OpFunction:
880 	{
881 		uint32_t res = ops[0];
882 		uint32_t id = ops[1];
883 		// Control
884 		uint32_t type = ops[3];
885 
886 		if (current_function)
887 			SPIRV_CROSS_THROW("Must end a function before starting a new one!");
888 
889 		current_function = &set<SPIRFunction>(id, res, type);
890 		break;
891 	}
892 
893 	case OpFunctionParameter:
894 	{
895 		uint32_t type = ops[0];
896 		uint32_t id = ops[1];
897 
898 		if (!current_function)
899 			SPIRV_CROSS_THROW("Must be in a function!");
900 
901 		current_function->add_parameter(type, id);
902 		set<SPIRVariable>(id, type, StorageClassFunction);
903 		break;
904 	}
905 
906 	case OpFunctionEnd:
907 	{
908 		if (current_block)
909 		{
910 			// Very specific error message, but seems to come up quite often.
911 			SPIRV_CROSS_THROW(
912 			    "Cannot end a function before ending the current block.\n"
913 			    "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
914 		}
915 		current_function = nullptr;
916 		break;
917 	}
918 
919 	// Blocks
920 	case OpLabel:
921 	{
922 		// OpLabel always starts a block.
923 		if (!current_function)
924 			SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
925 
926 		uint32_t id = ops[0];
927 
928 		current_function->blocks.push_back(id);
929 		if (!current_function->entry_block)
930 			current_function->entry_block = id;
931 
932 		if (current_block)
933 			SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
934 
935 		current_block = &set<SPIRBlock>(id);
936 		break;
937 	}
938 
939 	// Branch instructions end blocks.
940 	case OpBranch:
941 	{
942 		if (!current_block)
943 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
944 
945 		uint32_t target = ops[0];
946 		current_block->terminator = SPIRBlock::Direct;
947 		current_block->next_block = target;
948 		current_block = nullptr;
949 		break;
950 	}
951 
952 	case OpBranchConditional:
953 	{
954 		if (!current_block)
955 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
956 
957 		current_block->condition = ops[0];
958 		current_block->true_block = ops[1];
959 		current_block->false_block = ops[2];
960 
961 		current_block->terminator = SPIRBlock::Select;
962 		current_block = nullptr;
963 		break;
964 	}
965 
966 	case OpSwitch:
967 	{
968 		if (!current_block)
969 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
970 
971 		current_block->terminator = SPIRBlock::MultiSelect;
972 
973 		current_block->condition = ops[0];
974 		current_block->default_block = ops[1];
975 
976 		for (uint32_t i = 2; i + 2 <= length; i += 2)
977 			current_block->cases.push_back({ ops[i], ops[i + 1] });
978 
979 		// If we jump to next block, make it break instead since we're inside a switch case block at that point.
980 		ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
981 
982 		current_block = nullptr;
983 		break;
984 	}
985 
986 	case OpKill:
987 	{
988 		if (!current_block)
989 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
990 		current_block->terminator = SPIRBlock::Kill;
991 		current_block = nullptr;
992 		break;
993 	}
994 
995 	case OpTerminateRayKHR:
996 		// NV variant is not a terminator.
997 		if (!current_block)
998 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
999 		current_block->terminator = SPIRBlock::TerminateRay;
1000 		current_block = nullptr;
1001 		break;
1002 
1003 	case OpIgnoreIntersectionKHR:
1004 		// NV variant is not a terminator.
1005 		if (!current_block)
1006 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1007 		current_block->terminator = SPIRBlock::IgnoreIntersection;
1008 		current_block = nullptr;
1009 		break;
1010 
1011 	case OpReturn:
1012 	{
1013 		if (!current_block)
1014 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1015 		current_block->terminator = SPIRBlock::Return;
1016 		current_block = nullptr;
1017 		break;
1018 	}
1019 
1020 	case OpReturnValue:
1021 	{
1022 		if (!current_block)
1023 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1024 		current_block->terminator = SPIRBlock::Return;
1025 		current_block->return_value = ops[0];
1026 		current_block = nullptr;
1027 		break;
1028 	}
1029 
1030 	case OpUnreachable:
1031 	{
1032 		if (!current_block)
1033 			SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1034 		current_block->terminator = SPIRBlock::Unreachable;
1035 		current_block = nullptr;
1036 		break;
1037 	}
1038 
1039 	case OpSelectionMerge:
1040 	{
1041 		if (!current_block)
1042 			SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1043 
1044 		current_block->next_block = ops[0];
1045 		current_block->merge = SPIRBlock::MergeSelection;
1046 		ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1047 
1048 		if (length >= 2)
1049 		{
1050 			if (ops[1] & SelectionControlFlattenMask)
1051 				current_block->hint = SPIRBlock::HintFlatten;
1052 			else if (ops[1] & SelectionControlDontFlattenMask)
1053 				current_block->hint = SPIRBlock::HintDontFlatten;
1054 		}
1055 		break;
1056 	}
1057 
1058 	case OpLoopMerge:
1059 	{
1060 		if (!current_block)
1061 			SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1062 
1063 		current_block->merge_block = ops[0];
1064 		current_block->continue_block = ops[1];
1065 		current_block->merge = SPIRBlock::MergeLoop;
1066 
1067 		ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1068 		ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1069 
1070 		ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
1071 
1072 		// Don't add loop headers to continue blocks,
1073 		// which would make it impossible branch into the loop header since
1074 		// they are treated as continues.
1075 		if (current_block->continue_block != BlockID(current_block->self))
1076 			ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1077 
1078 		if (length >= 3)
1079 		{
1080 			if (ops[2] & LoopControlUnrollMask)
1081 				current_block->hint = SPIRBlock::HintUnroll;
1082 			else if (ops[2] & LoopControlDontUnrollMask)
1083 				current_block->hint = SPIRBlock::HintDontUnroll;
1084 		}
1085 		break;
1086 	}
1087 
1088 	case OpSpecConstantOp:
1089 	{
1090 		if (length < 3)
1091 			SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1092 
1093 		uint32_t result_type = ops[0];
1094 		uint32_t id = ops[1];
1095 		auto spec_op = static_cast<Op>(ops[2]);
1096 
1097 		set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1098 		break;
1099 	}
1100 
1101 	case OpLine:
1102 	{
1103 		// OpLine might come at global scope, but we don't care about those since they will not be declared in any
1104 		// meaningful correct order.
1105 		// Ignore all OpLine directives which live outside a function.
1106 		if (current_block)
1107 			current_block->ops.push_back(instruction);
1108 
1109 		// Line directives may arrive before first OpLabel.
1110 		// Treat this as the line of the function declaration,
1111 		// so warnings for arguments can propagate properly.
1112 		if (current_function)
1113 		{
1114 			// Store the first one we find and emit it before creating the function prototype.
1115 			if (current_function->entry_line.file_id == 0)
1116 			{
1117 				current_function->entry_line.file_id = ops[0];
1118 				current_function->entry_line.line_literal = ops[1];
1119 			}
1120 		}
1121 		break;
1122 	}
1123 
1124 	case OpNoLine:
1125 	{
1126 		// OpNoLine might come at global scope.
1127 		if (current_block)
1128 			current_block->ops.push_back(instruction);
1129 		break;
1130 	}
1131 
1132 	// Actual opcodes.
1133 	default:
1134 	{
1135 		if (!current_block)
1136 			SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1137 
1138 		current_block->ops.push_back(instruction);
1139 		break;
1140 	}
1141 	}
1142 }
1143 
types_are_logically_equivalent(const SPIRType & a,const SPIRType & b) const1144 bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1145 {
1146 	if (a.basetype != b.basetype)
1147 		return false;
1148 	if (a.width != b.width)
1149 		return false;
1150 	if (a.vecsize != b.vecsize)
1151 		return false;
1152 	if (a.columns != b.columns)
1153 		return false;
1154 	if (a.array.size() != b.array.size())
1155 		return false;
1156 
1157 	size_t array_count = a.array.size();
1158 	if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1159 		return false;
1160 
1161 	if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1162 	{
1163 		if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1164 			return false;
1165 	}
1166 
1167 	if (a.member_types.size() != b.member_types.size())
1168 		return false;
1169 
1170 	size_t member_types = a.member_types.size();
1171 	for (size_t i = 0; i < member_types; i++)
1172 	{
1173 		if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1174 			return false;
1175 	}
1176 
1177 	return true;
1178 }
1179 
variable_storage_is_aliased(const SPIRVariable & v) const1180 bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1181 {
1182 	auto &type = get<SPIRType>(v.basetype);
1183 
1184 	auto *type_meta = ir.find_meta(type.self);
1185 
1186 	bool ssbo = v.storage == StorageClassStorageBuffer ||
1187 	            (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
1188 	bool image = type.basetype == SPIRType::Image;
1189 	bool counter = type.basetype == SPIRType::AtomicCounter;
1190 
1191 	bool is_restrict;
1192 	if (ssbo)
1193 		is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1194 	else
1195 		is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1196 
1197 	return !is_restrict && (ssbo || image || counter);
1198 }
1199 } // namespace SPIRV_CROSS_NAMESPACE
1200