1 /*
2 * Copyright (c) 2015-2021 The Khronos Group Inc.
3 * Copyright (c) 2015-2021 Valve Corporation
4 * Copyright (c) 2015-2021 LunarG, Inc.
5 * Copyright (c) 2015-2021 Google, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Author: Chia-I Wu <olvaffe@gmail.com>
14 * Author: Chris Forbes <chrisf@ijw.co.nz>
15 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
16 * Author: Mark Lobodzinski <mark@lunarg.com>
17 * Author: Mike Stroyan <mike@LunarG.com>
18 * Author: Tobin Ehlis <tobine@google.com>
19 * Author: Tony Barbour <tony@LunarG.com>
20 * Author: Cody Northrop <cnorthrop@google.com>
21 * Author: Dave Houlton <daveh@lunarg.com>
22 * Author: Jeremy Kniager <jeremyk@lunarg.com>
23 * Author: Shannon McPherson <shannon@lunarg.com>
24 * Author: John Zulauf <jzulauf@lunarg.com>
25 */
26
27 #include "../layer_validation_tests.h"
28 #include "vk_extension_helper.h"
29
30 #include <algorithm>
31 #include <array>
32 #include <chrono>
33 #include <memory>
34 #include <mutex>
35 #include <thread>
36
37 #include "cast_utils.h"
38
39 //
40 // POSITIVE VALIDATION TESTS
41 //
42 // These tests do not expect to encounter ANY validation errors pass only if this is true
43
TEST_F(VkPositiveLayerTest,ShaderRelaxedBlockLayout)44 TEST_F(VkPositiveLayerTest, ShaderRelaxedBlockLayout) {
45 // This is a positive test, no errors expected
46 // Verifies the ability to relax block layout rules with a shader that requires them to be relaxed
47 TEST_DESCRIPTION("Create a shader that requires relaxed block layout.");
48
49 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
50
51 // The Relaxed Block Layout extension was promoted to core in 1.1.
52 // Go ahead and check for it and turn it on in case a 1.0 device has it.
53 if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME)) {
54 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME);
55 return;
56 }
57 m_device_extension_names.push_back(VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME);
58 ASSERT_NO_FATAL_FAILURE(InitState());
59 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
60
61 // Vertex shader requiring relaxed layout.
62 // Without relaxed layout, we would expect a message like:
63 // "Structure id 2 decorated as Block for variable in Uniform storage class
64 // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16"
65
66 const std::string spv_source = R"(
67 OpCapability Shader
68 OpMemoryModel Logical GLSL450
69 OpEntryPoint Vertex %main "main"
70 OpSource GLSL 450
71 OpMemberDecorate %S 0 Offset 0
72 OpMemberDecorate %S 1 Offset 4
73 OpDecorate %S Block
74 OpDecorate %B DescriptorSet 0
75 OpDecorate %B Binding 0
76 %void = OpTypeVoid
77 %3 = OpTypeFunction %void
78 %float = OpTypeFloat 32
79 %v3float = OpTypeVector %float 3
80 %S = OpTypeStruct %float %v3float
81 %_ptr_Uniform_S = OpTypePointer Uniform %S
82 %B = OpVariable %_ptr_Uniform_S Uniform
83 %main = OpFunction %void None %3
84 %5 = OpLabel
85 OpReturn
86 OpFunctionEnd
87 )";
88 m_errorMonitor->ExpectSuccess();
89 VkShaderObj vs(m_device, spv_source, VK_SHADER_STAGE_VERTEX_BIT, this);
90 m_errorMonitor->VerifyNotFound();
91 }
92
TEST_F(VkPositiveLayerTest,ShaderUboStd430Layout)93 TEST_F(VkPositiveLayerTest, ShaderUboStd430Layout) {
94 // This is a positive test, no errors expected
95 // Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed
96 TEST_DESCRIPTION("Create a shader that requires UBO std430 layout.");
97 // Enable req'd extensions
98 if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
99 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
100 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
101 return;
102 }
103 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
104 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
105
106 // Check for the UBO standard block layout extension and turn it on if it's available
107 if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME)) {
108 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
109 VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME);
110 return;
111 }
112 m_device_extension_names.push_back(VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME);
113
114 PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
115 (PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
116
117 auto uniform_buffer_standard_layout_features = LvlInitStruct<VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR>(NULL);
118 uniform_buffer_standard_layout_features.uniformBufferStandardLayout = VK_TRUE;
119 auto query_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&uniform_buffer_standard_layout_features);
120 vkGetPhysicalDeviceFeatures2(gpu(), &query_features2);
121
122 auto set_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&uniform_buffer_standard_layout_features);
123
124 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2));
125 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
126
127 // Vertex shader requiring std430 in a uniform buffer.
128 // Without uniform buffer standard layout, we would expect a message like:
129 // "Structure id 3 decorated as Block for variable in Uniform storage class
130 // must follow standard uniform buffer layout rules: member 0 is an array
131 // with stride 4 not satisfying alignment to 16"
132
133 const std::string spv_source = R"(
134 OpCapability Shader
135 OpMemoryModel Logical GLSL450
136 OpEntryPoint Vertex %main "main"
137 OpSource GLSL 460
138 OpDecorate %_arr_float_uint_8 ArrayStride 4
139 OpMemberDecorate %foo 0 Offset 0
140 OpDecorate %foo Block
141 OpDecorate %b DescriptorSet 0
142 OpDecorate %b Binding 0
143 %void = OpTypeVoid
144 %3 = OpTypeFunction %void
145 %float = OpTypeFloat 32
146 %uint = OpTypeInt 32 0
147 %uint_8 = OpConstant %uint 8
148 %_arr_float_uint_8 = OpTypeArray %float %uint_8
149 %foo = OpTypeStruct %_arr_float_uint_8
150 %_ptr_Uniform_foo = OpTypePointer Uniform %foo
151 %b = OpVariable %_ptr_Uniform_foo Uniform
152 %main = OpFunction %void None %3
153 %5 = OpLabel
154 OpReturn
155 OpFunctionEnd
156 )";
157
158 m_errorMonitor->ExpectSuccess();
159 VkShaderObj::CreateFromASM(*m_device, *this, VK_SHADER_STAGE_VERTEX_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_0);
160 m_errorMonitor->VerifyNotFound();
161 }
162
TEST_F(VkPositiveLayerTest,ShaderScalarBlockLayout)163 TEST_F(VkPositiveLayerTest, ShaderScalarBlockLayout) {
164 // This is a positive test, no errors expected
165 // Verifies the ability to scalar block layout rules with a shader that requires them to be relaxed
166 TEST_DESCRIPTION("Create a shader that requires scalar block layout.");
167 // Enable req'd extensions
168 if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
169 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
170 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
171 return;
172 }
173 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
174 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
175
176 // Check for the Scalar Block Layout extension and turn it on if it's available
177 if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME)) {
178 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME);
179 return;
180 }
181 m_device_extension_names.push_back(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME);
182
183 PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
184 (PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
185
186 auto scalar_block_features = LvlInitStruct<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(NULL);
187 auto query_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&scalar_block_features);
188 vkGetPhysicalDeviceFeatures2(gpu(), &query_features2);
189
190 if (scalar_block_features.scalarBlockLayout != VK_TRUE) {
191 printf("%s scalarBlockLayout feature not supported\n", kSkipPrefix);
192 return;
193 }
194
195 auto set_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&scalar_block_features);
196
197 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &set_features2));
198 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
199
200 // Vertex shader requiring scalar layout.
201 // Without scalar layout, we would expect a message like:
202 // "Structure id 2 decorated as Block for variable in Uniform storage class
203 // must follow standard uniform buffer layout rules: member 1 at offset 4 is not aligned to 16"
204
205 const std::string spv_source = R"(
206 OpCapability Shader
207 OpMemoryModel Logical GLSL450
208 OpEntryPoint Vertex %main "main"
209 OpSource GLSL 450
210 OpMemberDecorate %S 0 Offset 0
211 OpMemberDecorate %S 1 Offset 4
212 OpMemberDecorate %S 2 Offset 8
213 OpDecorate %S Block
214 OpDecorate %B DescriptorSet 0
215 OpDecorate %B Binding 0
216 %void = OpTypeVoid
217 %3 = OpTypeFunction %void
218 %float = OpTypeFloat 32
219 %v3float = OpTypeVector %float 3
220 %S = OpTypeStruct %float %float %v3float
221 %_ptr_Uniform_S = OpTypePointer Uniform %S
222 %B = OpVariable %_ptr_Uniform_S Uniform
223 %main = OpFunction %void None %3
224 %5 = OpLabel
225 OpReturn
226 OpFunctionEnd
227 )";
228
229 m_errorMonitor->ExpectSuccess();
230 VkShaderObj vs(m_device, spv_source, VK_SHADER_STAGE_VERTEX_BIT, this);
231 m_errorMonitor->VerifyNotFound();
232 }
233
TEST_F(VkPositiveLayerTest,ShaderNonSemanticInfo)234 TEST_F(VkPositiveLayerTest, ShaderNonSemanticInfo) {
235 // This is a positive test, no errors expected
236 // Verifies the ability to use non-semantic extended instruction sets when the extension is enabled
237 TEST_DESCRIPTION("Create a shader that uses SPV_KHR_non_semantic_info.");
238 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
239
240 // Check for the extension and turn it on if it's available
241 if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME)) {
242 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
243 VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME);
244 return;
245 }
246 m_device_extension_names.push_back(VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME);
247
248 ASSERT_NO_FATAL_FAILURE(InitState());
249 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
250
251 // compute shader using a non-semantic extended instruction set.
252
253 const std::string spv_source = R"(
254 OpCapability Shader
255 OpExtension "SPV_KHR_non_semantic_info"
256 %non_semantic = OpExtInstImport "NonSemantic.Validation.Test"
257 OpMemoryModel Logical GLSL450
258 OpEntryPoint GLCompute %main "main"
259 OpExecutionMode %main LocalSize 1 1 1
260 %void = OpTypeVoid
261 %1 = OpExtInst %void %non_semantic 55 %void
262 %func = OpTypeFunction %void
263 %main = OpFunction %void None %func
264 %2 = OpLabel
265 OpReturn
266 OpFunctionEnd
267 )";
268
269 m_errorMonitor->ExpectSuccess();
270 VkShaderObj cs(m_device, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, this);
271 m_errorMonitor->VerifyNotFound();
272 }
273
TEST_F(VkPositiveLayerTest,SpirvGroupDecorations)274 TEST_F(VkPositiveLayerTest, SpirvGroupDecorations) {
275 TEST_DESCRIPTION("Test shader validation support for group decorations.");
276 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
277 ASSERT_NO_FATAL_FAILURE(InitState());
278 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
279
280 const std::string spv_source = R"(
281 OpCapability Shader
282 OpMemoryModel Logical GLSL450
283 OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID
284 OpExecutionMode %main LocalSize 1 1 1
285 OpSource GLSL 430
286 OpName %main "main"
287 OpName %gl_GlobalInvocationID "gl_GlobalInvocationID"
288 OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId
289 OpDecorate %_runtimearr_float ArrayStride 4
290 OpDecorate %4 BufferBlock
291 OpDecorate %5 Offset 0
292 %4 = OpDecorationGroup
293 %5 = OpDecorationGroup
294 OpGroupDecorate %4 %_struct_6 %_struct_7 %_struct_8 %_struct_9 %_struct_10 %_struct_11
295 OpGroupMemberDecorate %5 %_struct_6 0 %_struct_7 0 %_struct_8 0 %_struct_9 0 %_struct_10 0 %_struct_11 0
296 OpDecorate %12 DescriptorSet 0
297 OpDecorate %13 DescriptorSet 0
298 OpDecorate %13 NonWritable
299 OpDecorate %13 Restrict
300 %14 = OpDecorationGroup
301 %12 = OpDecorationGroup
302 %13 = OpDecorationGroup
303 OpGroupDecorate %12 %15
304 OpGroupDecorate %12 %15
305 OpGroupDecorate %12 %15
306 OpDecorate %15 DescriptorSet 0
307 OpDecorate %15 Binding 5
308 OpGroupDecorate %14 %16
309 OpDecorate %16 DescriptorSet 0
310 OpDecorate %16 Binding 0
311 OpGroupDecorate %12 %17
312 OpDecorate %17 Binding 1
313 OpGroupDecorate %13 %18 %19
314 OpDecorate %18 Binding 2
315 OpDecorate %19 Binding 3
316 OpGroupDecorate %14 %20
317 OpGroupDecorate %12 %20
318 OpGroupDecorate %13 %20
319 OpDecorate %20 Binding 4
320 %bool = OpTypeBool
321 %void = OpTypeVoid
322 %23 = OpTypeFunction %void
323 %uint = OpTypeInt 32 0
324 %int = OpTypeInt 32 1
325 %float = OpTypeFloat 32
326 %v3uint = OpTypeVector %uint 3
327 %v3float = OpTypeVector %float 3
328 %_ptr_Input_v3uint = OpTypePointer Input %v3uint
329 %_ptr_Uniform_int = OpTypePointer Uniform %int
330 %_ptr_Uniform_float = OpTypePointer Uniform %float
331 %_runtimearr_int = OpTypeRuntimeArray %int
332 %_runtimearr_float = OpTypeRuntimeArray %float
333 %gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input
334 %int_0 = OpConstant %int 0
335 %_struct_6 = OpTypeStruct %_runtimearr_float
336 %_ptr_Uniform__struct_6 = OpTypePointer Uniform %_struct_6
337 %15 = OpVariable %_ptr_Uniform__struct_6 Uniform
338 %_struct_7 = OpTypeStruct %_runtimearr_float
339 %_ptr_Uniform__struct_7 = OpTypePointer Uniform %_struct_7
340 %16 = OpVariable %_ptr_Uniform__struct_7 Uniform
341 %_struct_8 = OpTypeStruct %_runtimearr_float
342 %_ptr_Uniform__struct_8 = OpTypePointer Uniform %_struct_8
343 %17 = OpVariable %_ptr_Uniform__struct_8 Uniform
344 %_struct_9 = OpTypeStruct %_runtimearr_float
345 %_ptr_Uniform__struct_9 = OpTypePointer Uniform %_struct_9
346 %18 = OpVariable %_ptr_Uniform__struct_9 Uniform
347 %_struct_10 = OpTypeStruct %_runtimearr_float
348 %_ptr_Uniform__struct_10 = OpTypePointer Uniform %_struct_10
349 %19 = OpVariable %_ptr_Uniform__struct_10 Uniform
350 %_struct_11 = OpTypeStruct %_runtimearr_float
351 %_ptr_Uniform__struct_11 = OpTypePointer Uniform %_struct_11
352 %20 = OpVariable %_ptr_Uniform__struct_11 Uniform
353 %main = OpFunction %void None %23
354 %40 = OpLabel
355 %41 = OpLoad %v3uint %gl_GlobalInvocationID
356 %42 = OpCompositeExtract %uint %41 0
357 %43 = OpAccessChain %_ptr_Uniform_float %16 %int_0 %42
358 %44 = OpAccessChain %_ptr_Uniform_float %17 %int_0 %42
359 %45 = OpAccessChain %_ptr_Uniform_float %18 %int_0 %42
360 %46 = OpAccessChain %_ptr_Uniform_float %19 %int_0 %42
361 %47 = OpAccessChain %_ptr_Uniform_float %20 %int_0 %42
362 %48 = OpAccessChain %_ptr_Uniform_float %15 %int_0 %42
363 %49 = OpLoad %float %43
364 %50 = OpLoad %float %44
365 %51 = OpLoad %float %45
366 %52 = OpLoad %float %46
367 %53 = OpLoad %float %47
368 %54 = OpFAdd %float %49 %50
369 %55 = OpFAdd %float %54 %51
370 %56 = OpFAdd %float %55 %52
371 %57 = OpFAdd %float %56 %53
372 OpStore %48 %57
373 OpReturn
374 OpFunctionEnd
375 )";
376
377 // CreateDescriptorSetLayout
378 VkDescriptorSetLayoutBinding dslb[6] = {};
379 size_t dslb_size = size(dslb);
380 for (size_t i = 0; i < dslb_size; i++) {
381 dslb[i].binding = i;
382 dslb[i].descriptorCount = 1;
383 dslb[i].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
384 dslb[i].pImmutableSamplers = NULL;
385 dslb[i].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_ALL;
386 }
387 if (m_device->props.limits.maxPerStageDescriptorStorageBuffers < dslb_size) {
388 printf("%sNeeded storage buffer bindings exceeds this devices limit. Skipping tests.\n", kSkipPrefix);
389 return;
390 }
391
392 CreateComputePipelineHelper pipe(*this);
393 pipe.InitInfo();
394 pipe.dsl_bindings_.resize(dslb_size);
395 memcpy(pipe.dsl_bindings_.data(), dslb, dslb_size * sizeof(VkDescriptorSetLayoutBinding));
396 pipe.cs_.reset(new VkShaderObj(m_device, bindStateMinimalShaderText, VK_SHADER_STAGE_COMPUTE_BIT, this));
397 pipe.InitState();
398 m_errorMonitor->ExpectSuccess();
399 pipe.CreateComputePipeline();
400 m_errorMonitor->VerifyNotFound();
401 }
402
TEST_F(VkPositiveLayerTest,CreatePipelineCheckShaderCapabilityExtension1of2)403 TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) {
404 // This is a positive test, no errors expected
405 // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
406 TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2");
407
408 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
409 if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) {
410 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
411 VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
412 return;
413 }
414 m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
415 ASSERT_NO_FATAL_FAILURE(InitState());
416
417 // These tests require that the device support multiViewport
418 if (!m_device->phy().features().multiViewport) {
419 printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
420 return;
421 }
422 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
423
424 // Vertex shader using viewport array capability
425 char const *vsSource = R"glsl(
426 #version 450
427 #extension GL_ARB_shader_viewport_layer_array : enable
428 void main() {
429 gl_ViewportIndex = 1;
430 }
431 )glsl";
432
433 VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
434
435 CreatePipelineHelper pipe(*this);
436 pipe.InitInfo();
437 pipe.shader_stages_ = {vs.GetStageCreateInfo()};
438 pipe.InitState();
439 m_errorMonitor->ExpectSuccess();
440 pipe.CreateGraphicsPipeline();
441 m_errorMonitor->VerifyNotFound();
442 }
443
TEST_F(VkPositiveLayerTest,CreatePipelineCheckShaderCapabilityExtension2of2)444 TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) {
445 // This is a positive test, no errors expected
446 // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
447 TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2");
448
449 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
450 // Need to use SPV_EXT_shader_viewport_index_layer
451 if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) {
452 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
453 VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
454 return;
455 }
456 m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
457 ASSERT_NO_FATAL_FAILURE(InitState());
458
459 // These tests require that the device support multiViewport
460 if (!m_device->phy().features().multiViewport) {
461 printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
462 return;
463 }
464 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
465
466 // Vertex shader using viewport array capability
467 char const *vsSource = R"glsl(
468 #version 450
469 #extension GL_ARB_shader_viewport_layer_array : enable
470 void main() {
471 gl_ViewportIndex = 1;
472 }
473 )glsl";
474
475 VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
476
477 CreatePipelineHelper pipe(*this);
478 pipe.InitInfo();
479 pipe.shader_stages_ = {vs.GetStageCreateInfo()};
480 pipe.InitState();
481 m_errorMonitor->ExpectSuccess();
482 pipe.CreateGraphicsPipeline();
483 m_errorMonitor->VerifyNotFound();
484 }
485
TEST_F(VkPositiveLayerTest,CreatePipelineFragmentOutputNotWrittenButMasked)486 TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) {
487 TEST_DESCRIPTION(
488 "Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's "
489 "write mask is 0.");
490 m_errorMonitor->ExpectSuccess();
491
492 ASSERT_NO_FATAL_FAILURE(Init());
493
494 char const *fsSource = R"glsl(
495 #version 450
496 void main() {}
497 )glsl";
498
499 VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
500 VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
501
502 VkPipelineObj pipe(m_device);
503 pipe.AddShader(&vs);
504 pipe.AddShader(&fs);
505
506 /* set up CB 0, not written, but also masked */
507 pipe.AddDefaultColorAttachment(0);
508 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
509
510 VkDescriptorSetObj descriptorSet(m_device);
511 descriptorSet.AppendDummy();
512 descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
513
514 pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
515
516 m_errorMonitor->VerifyNotFound();
517 }
518
TEST_F(VkPositiveLayerTest,PointSizeWriteInFunction)519 TEST_F(VkPositiveLayerTest, PointSizeWriteInFunction) {
520 TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize in vertex shader function.");
521
522 ASSERT_NO_FATAL_FAILURE(Init());
523 m_errorMonitor->ExpectSuccess();
524 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
525 ASSERT_NO_FATAL_FAILURE(InitViewport());
526
527 // Create VS declaring PointSize and write to it in a function call.
528 VkShaderObj vs(m_device, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
529 VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
530 {
531 CreatePipelineHelper pipe(*this);
532 pipe.InitInfo();
533 pipe.shader_stages_ = {vs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
534 pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
535 pipe.InitState();
536 pipe.CreateGraphicsPipeline();
537 }
538 m_errorMonitor->VerifyNotFound();
539 }
540
TEST_F(VkPositiveLayerTest,PointSizeGeomShaderSuccess)541 TEST_F(VkPositiveLayerTest, PointSizeGeomShaderSuccess) {
542 TEST_DESCRIPTION(
543 "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, and write in the final geometry stage.");
544
545 ASSERT_NO_FATAL_FAILURE(Init());
546 m_errorMonitor->ExpectSuccess();
547
548 if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) {
549 printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix);
550 return;
551 }
552 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
553 ASSERT_NO_FATAL_FAILURE(InitViewport());
554
555 // Create VS declaring PointSize and writing to it
556 VkShaderObj vs(m_device, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
557 VkShaderObj gs(m_device, bindStateGeomPointSizeShaderText, VK_SHADER_STAGE_GEOMETRY_BIT, this);
558 VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
559
560 CreatePipelineHelper pipe(*this);
561 pipe.InitInfo();
562 pipe.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
563 // Set Input Assembly to TOPOLOGY POINT LIST
564 pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
565 pipe.InitState();
566 pipe.CreateGraphicsPipeline();
567 m_errorMonitor->VerifyNotFound();
568 }
569
TEST_F(VkPositiveLayerTest,LoosePointSizeWrite)570 TEST_F(VkPositiveLayerTest, LoosePointSizeWrite) {
571 TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST and write PointSize outside of a structure.");
572
573 ASSERT_NO_FATAL_FAILURE(Init());
574 m_errorMonitor->ExpectSuccess();
575 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
576 ASSERT_NO_FATAL_FAILURE(InitViewport());
577
578 const std::string LoosePointSizeWrite = R"(
579 OpCapability Shader
580 %1 = OpExtInstImport "GLSL.std.450"
581 OpMemoryModel Logical GLSL450
582 OpEntryPoint Vertex %main "main" %glposition %glpointsize %gl_VertexIndex
583 OpSource GLSL 450
584 OpName %main "main"
585 OpName %vertices "vertices"
586 OpName %glposition "glposition"
587 OpName %glpointsize "glpointsize"
588 OpName %gl_VertexIndex "gl_VertexIndex"
589 OpDecorate %glposition BuiltIn Position
590 OpDecorate %glpointsize BuiltIn PointSize
591 OpDecorate %gl_VertexIndex BuiltIn VertexIndex
592 %void = OpTypeVoid
593 %3 = OpTypeFunction %void
594 %float = OpTypeFloat 32
595 %v2float = OpTypeVector %float 2
596 %uint = OpTypeInt 32 0
597 %uint_3 = OpConstant %uint 3
598 %_arr_v2float_uint_3 = OpTypeArray %v2float %uint_3
599 %_ptr_Private__arr_v2float_uint_3 = OpTypePointer Private %_arr_v2float_uint_3
600 %vertices = OpVariable %_ptr_Private__arr_v2float_uint_3 Private
601 %int = OpTypeInt 32 1
602 %int_0 = OpConstant %int 0
603 %float_n1 = OpConstant %float -1
604 %16 = OpConstantComposite %v2float %float_n1 %float_n1
605 %_ptr_Private_v2float = OpTypePointer Private %v2float
606 %int_1 = OpConstant %int 1
607 %float_1 = OpConstant %float 1
608 %21 = OpConstantComposite %v2float %float_1 %float_n1
609 %int_2 = OpConstant %int 2
610 %float_0 = OpConstant %float 0
611 %25 = OpConstantComposite %v2float %float_0 %float_1
612 %v4float = OpTypeVector %float 4
613 %_ptr_Output_gl_Position = OpTypePointer Output %v4float
614 %glposition = OpVariable %_ptr_Output_gl_Position Output
615 %_ptr_Output_gl_PointSize = OpTypePointer Output %float
616 %glpointsize = OpVariable %_ptr_Output_gl_PointSize Output
617 %_ptr_Input_int = OpTypePointer Input %int
618 %gl_VertexIndex = OpVariable %_ptr_Input_int Input
619 %int_3 = OpConstant %int 3
620 %_ptr_Output_v4float = OpTypePointer Output %v4float
621 %_ptr_Output_float = OpTypePointer Output %float
622 %main = OpFunction %void None %3
623 %5 = OpLabel
624 %18 = OpAccessChain %_ptr_Private_v2float %vertices %int_0
625 OpStore %18 %16
626 %22 = OpAccessChain %_ptr_Private_v2float %vertices %int_1
627 OpStore %22 %21
628 %26 = OpAccessChain %_ptr_Private_v2float %vertices %int_2
629 OpStore %26 %25
630 %33 = OpLoad %int %gl_VertexIndex
631 %35 = OpSMod %int %33 %int_3
632 %36 = OpAccessChain %_ptr_Private_v2float %vertices %35
633 %37 = OpLoad %v2float %36
634 %38 = OpCompositeExtract %float %37 0
635 %39 = OpCompositeExtract %float %37 1
636 %40 = OpCompositeConstruct %v4float %38 %39 %float_0 %float_1
637 %42 = OpAccessChain %_ptr_Output_v4float %glposition
638 OpStore %42 %40
639 OpStore %glpointsize %float_1
640 OpReturn
641 OpFunctionEnd
642 )";
643
644 // Create VS declaring PointSize and write to it in a function call.
645 VkShaderObj vs(m_device, LoosePointSizeWrite, VK_SHADER_STAGE_VERTEX_BIT, this);
646 VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
647
648 {
649 CreatePipelineHelper pipe(*this);
650 pipe.InitInfo();
651 pipe.shader_stages_ = {vs.GetStageCreateInfo(), ps.GetStageCreateInfo()};
652 // Set Input Assembly to TOPOLOGY POINT LIST
653 pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
654 pipe.InitState();
655 pipe.CreateGraphicsPipeline();
656 }
657 m_errorMonitor->VerifyNotFound();
658 }
659
TEST_F(VkPositiveLayerTest,ShaderDrawParametersWithoutFeature)660 TEST_F(VkPositiveLayerTest, ShaderDrawParametersWithoutFeature) {
661 TEST_DESCRIPTION("Use VK_KHR_shader_draw_parameters in 1.0 before shaderDrawParameters feature was added");
662 m_errorMonitor->ExpectSuccess();
663
664 SetTargetApiVersion(VK_API_VERSION_1_0);
665 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
666 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME)) {
667 m_device_extension_names.push_back(VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME);
668 } else {
669 printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME);
670 return;
671 }
672 ASSERT_NO_FATAL_FAILURE(InitState());
673 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
674 if (DeviceValidationVersion() != VK_API_VERSION_1_0) {
675 printf("%s Tests requires Vulkan 1.0 exactly, skipping test\n", kSkipPrefix);
676 return;
677 }
678
679 char const *vsSource = R"glsl(
680 #version 460
681 void main(){
682 gl_Position = vec4(float(gl_BaseVertex));
683 }
684 )glsl";
685 VkShaderObj vs(*m_device, VK_SHADER_STAGE_VERTEX_BIT);
686
687 if (VK_SUCCESS == vs.InitFromGLSLTry(*this, vsSource)) {
688 const auto set_info = [&](CreatePipelineHelper &helper) {
689 helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()};
690 };
691 CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true);
692 }
693
694 m_errorMonitor->VerifyNotFound();
695 }
696
TEST_F(VkPositiveLayerTest,ShaderDrawParametersWithoutFeature11)697 TEST_F(VkPositiveLayerTest, ShaderDrawParametersWithoutFeature11) {
698 TEST_DESCRIPTION("Use VK_KHR_shader_draw_parameters in 1.1 using the extension");
699 m_errorMonitor->ExpectSuccess();
700
701 SetTargetApiVersion(VK_API_VERSION_1_1);
702 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
703
704 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
705 printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
706 return;
707 }
708
709 if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME)) {
710 m_device_extension_names.push_back(VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME);
711 } else {
712 printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME);
713 return;
714 }
715 ASSERT_NO_FATAL_FAILURE(InitState());
716 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
717
718 char const *vsSource = R"glsl(
719 #version 460
720 void main(){
721 gl_Position = vec4(float(gl_BaseVertex));
722 }
723 )glsl";
724 VkShaderObj vs(*m_device, VK_SHADER_STAGE_VERTEX_BIT);
725
726 // make sure using SPIR-V 1.3 as extension is core and not needed in Vulkan then
727 if (VK_SUCCESS == vs.InitFromGLSLTry(*this, vsSource, false, SPV_ENV_VULKAN_1_1)) {
728 const auto set_info = [&](CreatePipelineHelper &helper) {
729 helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()};
730 };
731 CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true);
732 }
733
734 m_errorMonitor->VerifyNotFound();
735 }
736
TEST_F(VkPositiveLayerTest,ShaderDrawParametersWithFeature)737 TEST_F(VkPositiveLayerTest, ShaderDrawParametersWithFeature) {
738 TEST_DESCRIPTION("Use VK_KHR_shader_draw_parameters in 1.2 with feature bit enabled");
739 m_errorMonitor->ExpectSuccess();
740
741 // use 1.2 to get the feature bit in VkPhysicalDeviceVulkan11Features
742 SetTargetApiVersion(VK_API_VERSION_1_2);
743
744 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
745
746 // Devsim won't read in values like maxDescriptorSetUpdateAfterBindUniformBuffers which cause OneshotTest to fail pipeline
747 // layout creation if using 1.2 devsim as it enables VK_EXT_descriptor_indexing
748 if (IsPlatform(kMockICD) || DeviceSimulation()) {
749 printf("%sNot suppored by MockICD, skipping tests\n", kSkipPrefix);
750 return;
751 }
752
753 if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
754 printf("%s Tests requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
755 return;
756 }
757
758 auto features11 = LvlInitStruct<VkPhysicalDeviceVulkan11Features>();
759 features11.shaderDrawParameters = VK_TRUE;
760 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features11);
761
762 vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
763
764 if (features11.shaderDrawParameters != VK_TRUE) {
765 printf("shaderDrawParameters not supported, skipping test\n");
766 return;
767 }
768
769 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
770 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
771
772 char const *vsSource = R"glsl(
773 #version 460
774 void main(){
775 gl_Position = vec4(float(gl_BaseVertex));
776 }
777 )glsl";
778 VkShaderObj vs(*m_device, VK_SHADER_STAGE_VERTEX_BIT);
779
780 // make sure using SPIR-V 1.3 as extension is core and not needed in Vulkan then
781 if (VK_SUCCESS == vs.InitFromGLSLTry(*this, vsSource, false, SPV_ENV_VULKAN_1_1)) {
782 const auto set_info = [&](CreatePipelineHelper &helper) {
783 helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()};
784 };
785 CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true);
786 }
787
788 m_errorMonitor->VerifyNotFound();
789 }
790
TEST_F(VkPositiveLayerTest,ShaderImageAtomicInt64)791 TEST_F(VkPositiveLayerTest, ShaderImageAtomicInt64) {
792 TEST_DESCRIPTION("Test VK_EXT_shader_image_atomic_int64.");
793 SetTargetApiVersion(VK_API_VERSION_1_1);
794
795 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
796 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
797 } else {
798 printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
799 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
800 return;
801 }
802
803 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
804 if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME)) {
805 m_device_extension_names.push_back(VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME);
806 } else {
807 printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME);
808 return;
809 }
810
811 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
812 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
813 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
814
815 auto image_atomic_int64_features = lvl_init_struct<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT>();
816 auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&image_atomic_int64_features);
817 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
818
819 if (features2.features.shaderInt64 == VK_FALSE) {
820 printf("%s shaderInt64 feature not supported, skipping tests\n", kSkipPrefix);
821 return;
822 } else if (image_atomic_int64_features.shaderImageInt64Atomics == VK_FALSE) {
823 printf("%s shaderImageInt64Atomics feature not supported, skipping tests\n", kSkipPrefix);
824 return;
825 }
826
827 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
828
829 if (m_device->props.apiVersion < VK_API_VERSION_1_1) {
830 printf("%s At least Vulkan version 1.1 is required for SPIR-V 1.3, skipping test.\n", kSkipPrefix);
831 return;
832 }
833
834 // clang-format off
835 std::string cs_image_base = R"glsl(
836 #version 450
837 #extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
838 #extension GL_EXT_shader_image_int64 : enable
839 #extension GL_KHR_memory_scope_semantics : enable
840 layout(set = 0, binding = 0) buffer ssbo { uint64_t y; };
841 layout(set = 0, binding = 1, r64ui) uniform u64image2D z;
842 void main() {
843 )glsl";
844
845 std::string cs_image_load = cs_image_base + R"glsl(
846 y = imageAtomicLoad(z, ivec2(1, 1), gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
847 }
848 )glsl";
849
850 std::string cs_image_store = cs_image_base + R"glsl(
851 imageAtomicStore(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
852 }
853 )glsl";
854
855 std::string cs_image_exchange = cs_image_base + R"glsl(
856 imageAtomicExchange(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
857 }
858 )glsl";
859
860 std::string cs_image_add = cs_image_base + R"glsl(
861 y = imageAtomicAdd(z, ivec2(1, 1), y);
862 }
863 )glsl";
864 // clang-format on
865
866 const char *current_shader = nullptr;
867 const auto set_info = [&](CreateComputePipelineHelper &helper) {
868 // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class
869 helper.cs_.reset(new VkShaderObj(m_device, current_shader, VK_SHADER_STAGE_COMPUTE_BIT, this, "main", false, nullptr,
870 SPV_ENV_VULKAN_1_1));
871 helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
872 {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}};
873 };
874
875 // shaderImageInt64Atomics
876 current_shader = cs_image_load.c_str();
877 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
878
879 current_shader = cs_image_store.c_str();
880 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
881
882 current_shader = cs_image_exchange.c_str();
883 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
884
885 current_shader = cs_image_add.c_str();
886 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
887 }
888
TEST_F(VkPositiveLayerTest,ShaderAtomicFloat)889 TEST_F(VkPositiveLayerTest, ShaderAtomicFloat) {
890 TEST_DESCRIPTION("Test VK_EXT_shader_atomic_float.");
891 SetTargetApiVersion(VK_API_VERSION_1_1);
892
893 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
894 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
895 } else {
896 printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
897 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
898 return;
899 }
900
901 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
902 if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME)) {
903 m_device_extension_names.push_back(VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME);
904 } else {
905 printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME);
906 return;
907 }
908
909 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
910 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
911 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
912
913 auto atomic_float_features = lvl_init_struct<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT>();
914 auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&atomic_float_features);
915 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
916
917 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
918
919 if (m_device->props.apiVersion < VK_API_VERSION_1_1) {
920 printf("%s At least Vulkan version 1.1 is required for SPIR-V 1.3, skipping test.\n", kSkipPrefix);
921 return;
922 }
923
924 // clang-format off
925 std::string cs_32_base = R"glsl(
926 #version 450
927 #extension GL_EXT_shader_atomic_float : enable
928 #extension GL_KHR_memory_scope_semantics : enable
929 #extension GL_EXT_shader_explicit_arithmetic_types_float32 : enable
930 shared float32_t x;
931 layout(set = 0, binding = 0) buffer ssbo { float32_t y; };
932 void main() {
933 )glsl";
934
935 std::string cs_buffer_float_32_add = cs_32_base + R"glsl(
936 atomicAdd(y, 1);
937 }
938 )glsl";
939
940 std::string cs_buffer_float_32_load = cs_32_base + R"glsl(
941 y = 1 + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
942 }
943 )glsl";
944
945 std::string cs_buffer_float_32_store = cs_32_base + R"glsl(
946 float32_t a = 1;
947 atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
948 }
949 )glsl";
950
951 std::string cs_buffer_float_32_exchange = cs_32_base + R"glsl(
952 float32_t a = 1;
953 atomicExchange(y, a);
954 }
955 )glsl";
956
957 std::string cs_shared_float_32_add = cs_32_base + R"glsl(
958 y = atomicAdd(x, 1);
959 }
960 )glsl";
961
962 std::string cs_shared_float_32_load = cs_32_base + R"glsl(
963 y = 1 + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
964 }
965 )glsl";
966
967 std::string cs_shared_float_32_store = cs_32_base + R"glsl(
968 atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
969 }
970 )glsl";
971
972 std::string cs_shared_float_32_exchange = cs_32_base + R"glsl(
973 float32_t a = 1;
974 atomicExchange(x, y);
975 }
976 )glsl";
977
978 std::string cs_64_base = R"glsl(
979 #version 450
980 #extension GL_EXT_shader_atomic_float : enable
981 #extension GL_KHR_memory_scope_semantics : enable
982 #extension GL_EXT_shader_explicit_arithmetic_types_float64 : enable
983 shared float64_t x;
984 layout(set = 0, binding = 0) buffer ssbo { float64_t y; };
985 void main() {
986 )glsl";
987
988 std::string cs_buffer_float_64_add = cs_64_base + R"glsl(
989 atomicAdd(y, 1);
990 }
991 )glsl";
992
993 std::string cs_buffer_float_64_load = cs_64_base + R"glsl(
994 y = 1 + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
995 }
996 )glsl";
997
998 std::string cs_buffer_float_64_store = cs_64_base + R"glsl(
999 float64_t a = 1;
1000 atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1001 }
1002 )glsl";
1003
1004 std::string cs_buffer_float_64_exchange = cs_64_base + R"glsl(
1005 float64_t a = 1;
1006 atomicExchange(y, a);
1007 }
1008 )glsl";
1009
1010 std::string cs_shared_float_64_add = cs_64_base + R"glsl(
1011 y = atomicAdd(x, 1);
1012 }
1013 )glsl";
1014
1015 std::string cs_shared_float_64_load = cs_64_base + R"glsl(
1016 y = 1 + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1017 }
1018 )glsl";
1019
1020 std::string cs_shared_float_64_store = cs_64_base + R"glsl(
1021 atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1022 }
1023 )glsl";
1024
1025 std::string cs_shared_float_64_exchange = cs_64_base + R"glsl(
1026 float64_t a = 1;
1027 atomicExchange(x, y);
1028 }
1029 )glsl";
1030
1031 std::string cs_image_base = R"glsl(
1032 #version 450
1033 #extension GL_EXT_shader_atomic_float : enable
1034 #extension GL_KHR_memory_scope_semantics : enable
1035 layout(set = 0, binding = 0) buffer ssbo { float y; };
1036 layout(set = 0, binding = 1, r32f) uniform image2D z;
1037 void main() {
1038 )glsl";
1039
1040 std::string cs_image_load = cs_image_base + R"glsl(
1041 y = imageAtomicLoad(z, ivec2(1, 1), gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
1042 }
1043 )glsl";
1044
1045 std::string cs_image_store = cs_image_base + R"glsl(
1046 imageAtomicStore(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
1047 }
1048 )glsl";
1049
1050 std::string cs_image_exchange = cs_image_base + R"glsl(
1051 imageAtomicExchange(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed);
1052 }
1053 )glsl";
1054
1055 std::string cs_image_add = cs_image_base + R"glsl(
1056 y = imageAtomicAdd(z, ivec2(1, 1), y);
1057 }
1058 )glsl";
1059 // clang-format on
1060
1061 const char *current_shader = nullptr;
1062 // set binding for buffer tests
1063 std::vector<VkDescriptorSetLayoutBinding> current_bindings = {
1064 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}};
1065
1066 const auto set_info = [&](CreateComputePipelineHelper &helper) {
1067 // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class
1068 helper.cs_.reset(new VkShaderObj(m_device, current_shader, VK_SHADER_STAGE_COMPUTE_BIT, this, "main", false, nullptr,
1069 SPV_ENV_VULKAN_1_1));
1070 helper.dsl_bindings_ = current_bindings;
1071 };
1072
1073 if (atomic_float_features.shaderBufferFloat32Atomics == VK_TRUE) {
1074 current_shader = cs_buffer_float_32_load.c_str();
1075 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1076
1077 current_shader = cs_buffer_float_32_store.c_str();
1078 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1079
1080 current_shader = cs_buffer_float_32_exchange.c_str();
1081 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1082 }
1083
1084 if (atomic_float_features.shaderBufferFloat32AtomicAdd == VK_TRUE) {
1085 current_shader = cs_buffer_float_32_add.c_str();
1086 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1087 }
1088
1089 if (features2.features.shaderFloat64 == VK_TRUE) {
1090 if (atomic_float_features.shaderBufferFloat64Atomics == VK_TRUE) {
1091 current_shader = cs_buffer_float_64_load.c_str();
1092 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1093
1094 current_shader = cs_buffer_float_64_store.c_str();
1095 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1096
1097 current_shader = cs_buffer_float_64_exchange.c_str();
1098 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1099 }
1100
1101 if (atomic_float_features.shaderBufferFloat64AtomicAdd == VK_TRUE) {
1102 current_shader = cs_buffer_float_64_add.c_str();
1103 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1104 }
1105 }
1106
1107 if (atomic_float_features.shaderSharedFloat32Atomics == VK_TRUE) {
1108 current_shader = cs_shared_float_32_load.c_str();
1109 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1110
1111 current_shader = cs_shared_float_32_store.c_str();
1112 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1113
1114 current_shader = cs_shared_float_32_exchange.c_str();
1115 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1116 }
1117
1118 if (atomic_float_features.shaderSharedFloat32AtomicAdd == VK_TRUE) {
1119 current_shader = cs_shared_float_32_add.c_str();
1120 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1121 }
1122
1123 if (features2.features.shaderFloat64 == VK_TRUE) {
1124 if (atomic_float_features.shaderSharedFloat64Atomics == VK_TRUE) {
1125 current_shader = cs_shared_float_64_load.c_str();
1126 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1127
1128 current_shader = cs_shared_float_64_store.c_str();
1129 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1130
1131 current_shader = cs_shared_float_64_exchange.c_str();
1132 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1133 }
1134
1135 if (atomic_float_features.shaderSharedFloat64AtomicAdd == VK_TRUE) {
1136 current_shader = cs_shared_float_64_add.c_str();
1137 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1138 }
1139 }
1140
1141 // Add binding for images
1142 current_bindings.push_back({1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr});
1143
1144 if (atomic_float_features.shaderImageFloat32Atomics == VK_TRUE) {
1145 current_shader = cs_image_load.c_str();
1146 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1147
1148 current_shader = cs_image_store.c_str();
1149 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1150
1151 current_shader = cs_image_exchange.c_str();
1152 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1153 }
1154
1155 if (atomic_float_features.shaderImageFloat32AtomicAdd == VK_TRUE) {
1156 current_shader = cs_image_add.c_str();
1157 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1158 }
1159 }
1160
TEST_F(VkPositiveLayerTest,ShaderAtomicFloat2)1161 TEST_F(VkPositiveLayerTest, ShaderAtomicFloat2) {
1162 TEST_DESCRIPTION("Test VK_EXT_shader_atomic_float2.");
1163 SetTargetApiVersion(VK_API_VERSION_1_2);
1164
1165 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
1166
1167 if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
1168 printf("%s Test requires Vulkan >= 1.2.\n", kSkipPrefix);
1169 return;
1170 }
1171
1172 if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME)) {
1173 m_device_extension_names.push_back(VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME);
1174 m_device_extension_names.push_back(VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME);
1175 } else {
1176 printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME);
1177 return;
1178 }
1179
1180 auto atomic_float_features = lvl_init_struct<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT>();
1181 auto atomic_float2_features = lvl_init_struct<VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT>(&atomic_float_features);
1182 auto float16int8_features = LvlInitStruct<VkPhysicalDeviceShaderFloat16Int8Features>(&atomic_float2_features);
1183 auto storage_16_bit_features = LvlInitStruct<VkPhysicalDevice16BitStorageFeatures>(&float16int8_features);
1184 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&storage_16_bit_features);
1185 vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
1186
1187 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
1188
1189 // clang-format off
1190 std::string cs_16_base = R"glsl(
1191 #version 450
1192 #extension GL_EXT_shader_atomic_float2 : enable
1193 #extension GL_EXT_shader_explicit_arithmetic_types_float16 : enable
1194 #extension GL_EXT_shader_16bit_storage: enable
1195 #extension GL_KHR_memory_scope_semantics : enable
1196 shared float16_t x;
1197 layout(set = 0, binding = 0) buffer ssbo { float16_t y; };
1198 void main() {
1199 )glsl";
1200
1201 std::string cs_buffer_float_16_add = cs_16_base + R"glsl(
1202 atomicAdd(y, float16_t(1.0));
1203 }
1204 )glsl";
1205
1206 std::string cs_buffer_float_16_load = cs_16_base + R"glsl(
1207 y = float16_t(1.0) + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1208 }
1209 )glsl";
1210
1211 std::string cs_buffer_float_16_store = cs_16_base + R"glsl(
1212 float16_t a = float16_t(1.0);
1213 atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1214 }
1215 )glsl";
1216
1217 std::string cs_buffer_float_16_exchange = cs_16_base + R"glsl(
1218 float16_t a = float16_t(1.0);
1219 atomicExchange(y, a);
1220 }
1221 )glsl";
1222
1223 std::string cs_buffer_float_16_min = cs_16_base + R"glsl(
1224 atomicMin(y, float16_t(1.0));
1225 }
1226 )glsl";
1227
1228 std::string cs_buffer_float_16_max = cs_16_base + R"glsl(
1229 atomicMax(y, float16_t(1.0));
1230 }
1231 )glsl";
1232
1233 std::string cs_shared_float_16_add = cs_16_base + R"glsl(
1234 y = atomicAdd(x, float16_t(1.0));
1235 }
1236 )glsl";
1237
1238 std::string cs_shared_float_16_load = cs_16_base + R"glsl(
1239 y = float16_t(1.0) + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1240 }
1241 )glsl";
1242
1243 std::string cs_shared_float_16_store = cs_16_base + R"glsl(
1244 atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
1245 }
1246 )glsl";
1247
1248 std::string cs_shared_float_16_exchange = cs_16_base + R"glsl(
1249 float16_t a = float16_t(1.0);
1250 atomicExchange(x, y);
1251 }
1252 )glsl";
1253
1254 std::string cs_shared_float_16_min = cs_16_base + R"glsl(
1255 y = atomicMin(x, float16_t(1.0));
1256 }
1257 )glsl";
1258
1259 std::string cs_shared_float_16_max = cs_16_base + R"glsl(
1260 y = atomicMax(x, float16_t(1.0));
1261 }
1262 )glsl";
1263
1264 std::string cs_32_base = R"glsl(
1265 #version 450
1266 #extension GL_EXT_shader_atomic_float2 : enable
1267 #extension GL_EXT_shader_explicit_arithmetic_types_float32 : enable
1268 shared float32_t x;
1269 layout(set = 0, binding = 0) buffer ssbo { float32_t y; };
1270 void main() {
1271 )glsl";
1272
1273 std::string cs_buffer_float_32_min = cs_32_base + R"glsl(
1274 atomicMin(y, 1);
1275 }
1276 )glsl";
1277
1278 std::string cs_buffer_float_32_max = cs_32_base + R"glsl(
1279 atomicMax(y, 1);
1280 }
1281 )glsl";
1282
1283 std::string cs_shared_float_32_min = cs_32_base + R"glsl(
1284 y = atomicMin(x, 1);
1285 }
1286 )glsl";
1287
1288 std::string cs_shared_float_32_max = cs_32_base + R"glsl(
1289 y = atomicMax(x, 1);
1290 }
1291 )glsl";
1292
1293 std::string cs_64_base = R"glsl(
1294 #version 450
1295 #extension GL_EXT_shader_atomic_float2 : enable
1296 #extension GL_EXT_shader_explicit_arithmetic_types_float64 : enable
1297 shared float64_t x;
1298 layout(set = 0, binding = 0) buffer ssbo { float64_t y; };
1299 void main() {
1300 )glsl";
1301
1302 std::string cs_buffer_float_64_min = cs_64_base + R"glsl(
1303 atomicMin(y, 1);
1304 }
1305 )glsl";
1306
1307 std::string cs_buffer_float_64_max = cs_64_base + R"glsl(
1308 atomicMax(y, 1);
1309 }
1310 )glsl";
1311
1312 std::string cs_shared_float_64_min = cs_64_base + R"glsl(
1313 y = atomicMin(x, 1);
1314 }
1315 )glsl";
1316
1317 std::string cs_shared_float_64_max = cs_64_base + R"glsl(
1318 y = atomicMax(x, 1);
1319 }
1320 )glsl";
1321
1322 std::string cs_image_32_base = R"glsl(
1323 #version 450
1324 #extension GL_EXT_shader_atomic_float2 : enable
1325 layout(set = 0, binding = 0) buffer ssbo { float y; };
1326 layout(set = 0, binding = 1, r32f) uniform image2D z;
1327 void main() {
1328 )glsl";
1329
1330 std::string cs_image_32_min = cs_image_32_base + R"glsl(
1331 y = imageAtomicMin(z, ivec2(1, 1), y);
1332 }
1333 )glsl";
1334
1335 std::string cs_image_32_max = cs_image_32_base + R"glsl(
1336 y = imageAtomicMax(z, ivec2(1, 1), y);
1337 }
1338 )glsl";
1339 // clang-format on
1340
1341 const char *current_shader = nullptr;
1342 // set binding for buffer tests
1343 std::vector<VkDescriptorSetLayoutBinding> current_bindings = {
1344 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}};
1345
1346 const auto set_info = [&](CreateComputePipelineHelper &helper) {
1347 // This could get triggered in the event that the shader fails to compile
1348 m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091");
1349 // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class
1350 helper.cs_ = VkShaderObj::CreateFromGLSL(*m_device, *this, VK_SHADER_STAGE_COMPUTE_BIT, current_shader, "main", nullptr,
1351 SPV_ENV_VULKAN_1_1);
1352 // Skip the test if shader failed to compile
1353 helper.override_skip_ = !static_cast<bool>(helper.cs_);
1354 helper.dsl_bindings_ = current_bindings;
1355 };
1356
1357 if (float16int8_features.shaderFloat16 == VK_TRUE && storage_16_bit_features.storageBuffer16BitAccess == VK_TRUE) {
1358 if (atomic_float2_features.shaderBufferFloat16Atomics == VK_TRUE) {
1359 current_shader = cs_buffer_float_16_load.c_str();
1360 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1361
1362 current_shader = cs_buffer_float_16_store.c_str();
1363 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1364
1365 current_shader = cs_buffer_float_16_exchange.c_str();
1366 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1367 }
1368
1369 if (atomic_float2_features.shaderBufferFloat16AtomicAdd == VK_TRUE) {
1370 current_shader = cs_buffer_float_16_add.c_str();
1371 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1372 }
1373
1374 if (atomic_float2_features.shaderBufferFloat16AtomicMinMax == VK_TRUE) {
1375 current_shader = cs_buffer_float_16_min.c_str();
1376 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1377
1378 current_shader = cs_buffer_float_16_max.c_str();
1379 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1380 }
1381
1382 if (atomic_float2_features.shaderSharedFloat16Atomics == VK_TRUE) {
1383 current_shader = cs_shared_float_16_load.c_str();
1384 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1385
1386 current_shader = cs_shared_float_16_store.c_str();
1387 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1388
1389 current_shader = cs_shared_float_16_exchange.c_str();
1390 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1391 }
1392
1393 if (atomic_float2_features.shaderSharedFloat16AtomicAdd == VK_TRUE) {
1394 current_shader = cs_shared_float_16_add.c_str();
1395 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1396 }
1397
1398 if (atomic_float2_features.shaderSharedFloat16AtomicMinMax == VK_TRUE) {
1399 current_shader = cs_shared_float_16_min.c_str();
1400 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1401
1402 current_shader = cs_shared_float_16_max.c_str();
1403 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1404 }
1405 }
1406
1407 if (atomic_float2_features.shaderBufferFloat32AtomicMinMax == VK_TRUE) {
1408 current_shader = cs_buffer_float_32_min.c_str();
1409 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1410
1411 current_shader = cs_buffer_float_32_max.c_str();
1412 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1413 }
1414
1415 if (atomic_float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) {
1416 current_shader = cs_shared_float_32_min.c_str();
1417 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1418
1419 current_shader = cs_shared_float_32_max.c_str();
1420 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1421 }
1422
1423 if (features2.features.shaderFloat64 == VK_TRUE) {
1424 if (atomic_float2_features.shaderBufferFloat64AtomicMinMax == VK_TRUE) {
1425 current_shader = cs_buffer_float_64_min.c_str();
1426 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1427
1428 current_shader = cs_buffer_float_64_max.c_str();
1429 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1430 }
1431
1432 if (atomic_float2_features.shaderSharedFloat64AtomicMinMax == VK_TRUE) {
1433 current_shader = cs_shared_float_64_min.c_str();
1434 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1435
1436 current_shader = cs_shared_float_64_max.c_str();
1437 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1438 }
1439 }
1440
1441 // Add binding for images
1442 current_bindings.push_back({1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr});
1443
1444 if (atomic_float2_features.shaderSharedFloat32AtomicMinMax == VK_TRUE) {
1445 current_shader = cs_image_32_min.c_str();
1446 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1447
1448 current_shader = cs_image_32_max.c_str();
1449 CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true);
1450 }
1451 }
1452
TEST_F(VkPositiveLayerTest,ValidateComputeShaderSharedMemory)1453 TEST_F(VkPositiveLayerTest, ValidateComputeShaderSharedMemory) {
1454 TEST_DESCRIPTION("Validate compute shader shared memory does not exceed maxComputeSharedMemorySize");
1455
1456 ASSERT_NO_FATAL_FAILURE(Init());
1457
1458 // Make sure compute pipeline has a compute shader stage set
1459 char const *csSource = R"glsl(
1460 #version 450
1461 shared uint a;
1462 shared float b;
1463 shared vec2 c;
1464 shared mat3 d;
1465 shared mat4 e[3];
1466 struct A {
1467 int f;
1468 float g;
1469 uint h;
1470 };
1471 shared A f;
1472 void main(){
1473 }
1474 )glsl";
1475
1476 CreateComputePipelineHelper pipe(*this);
1477 pipe.InitInfo();
1478 pipe.cs_.reset(new VkShaderObj(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this));
1479 pipe.InitState();
1480 m_errorMonitor->ExpectSuccess();
1481 pipe.CreateComputePipeline();
1482 m_errorMonitor->VerifyNotFound();
1483 }
1484
TEST_F(VkPositiveLayerTest,TestShaderInputAndOutputComponents)1485 TEST_F(VkPositiveLayerTest, TestShaderInputAndOutputComponents) {
1486 TEST_DESCRIPTION("Test shader layout in and out with different components.");
1487
1488 ASSERT_NO_FATAL_FAILURE(Init());
1489 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1490
1491 char const *vsSource = R"glsl(
1492 #version 450
1493
1494 layout(location = 0, component = 0) out vec2 rg;
1495 layout(location = 0, component = 2) out float b;
1496
1497 layout(location = 1, component = 0) out float r;
1498 layout(location = 1, component = 1) out vec3 gba;
1499
1500 layout(location = 2) out vec4 out_color_0;
1501 layout(location = 3) out vec4 out_color_1;
1502
1503 layout(location = 4, component = 0) out float x;
1504 layout(location = 4, component = 1) out vec2 yz;
1505 layout(location = 4, component = 3) out float w;
1506
1507 layout(location = 5, component = 0) out vec3 stp;
1508 layout(location = 5, component = 3) out float q;
1509
1510 layout(location = 6, component = 0) out vec2 cd;
1511 layout(location = 6, component = 2) out float e;
1512 layout(location = 6, component = 3) out float f;
1513
1514 layout(location = 7, component = 0) out float ar1;
1515 layout(location = 7, component = 1) out float ar2[2];
1516 layout(location = 7, component = 3) out float ar3;
1517
1518 void main() {
1519 vec2 xy = vec2((gl_VertexIndex >> 1u) & 1u, gl_VertexIndex & 1u);
1520 gl_Position = vec4(xy, 0.0f, 1.0f);
1521 out_color_0 = vec4(1.0f, 0.0f, 1.0f, 0.0f);
1522 out_color_1 = vec4(0.0f, 1.0f, 0.0f, 1.0f);
1523 rg = vec2(0.25f, 0.75f);
1524 b = 0.5f;
1525 r = 0.75f;
1526 gba = vec3(1.0f);
1527 x = 1.0f;
1528 yz = vec2(0.25f);
1529 w = 0.5f;
1530 stp = vec3(1.0f);
1531 q = 0.1f;
1532 ar1 = 1.0f;
1533 ar2[0] = 0.5f;
1534 ar2[1] = 0.75f;
1535 ar3 = 1.0f;
1536 }
1537 )glsl";
1538 VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
1539
1540 char const *fsSource = R"glsl(
1541 #version 450
1542
1543 layout(location = 0, component = 0) in float r;
1544 layout(location = 0, component = 1) in vec2 gb;
1545
1546 layout(location = 1, component = 0) in float r1;
1547 layout(location = 1, component = 1) in float g1;
1548 layout(location = 1, component = 2) in float b1;
1549 layout(location = 1, component = 3) in float a1;
1550
1551 layout(location = 2) in InputBlock {
1552 layout(location = 3, component = 3) float one_alpha;
1553 layout(location = 2, component = 3) float zero_alpha;
1554 layout(location = 3, component = 2) float one_blue;
1555 layout(location = 2, component = 2) float zero_blue;
1556 layout(location = 3, component = 1) float one_green;
1557 layout(location = 2, component = 1) float zero_green;
1558 layout(location = 3, component = 0) float one_red;
1559 layout(location = 2, component = 0) float zero_red;
1560 } inBlock;
1561
1562 layout(location = 4, component = 0) in vec2 xy;
1563 layout(location = 4, component = 2) in vec2 zw;
1564
1565 layout(location = 5, component = 0) in vec2 st;
1566 layout(location = 5, component = 2) in vec2 pq;
1567
1568 layout(location = 6, component = 0) in vec4 cdef;
1569
1570 layout(location = 7, component = 0) in float ar1;
1571 layout(location = 7, component = 1) in float ar2;
1572 layout(location = 8, component = 1) in float ar3;
1573 layout(location = 7, component = 3) in float ar4;
1574
1575 layout (location = 0) out vec4 color;
1576
1577 void main() {
1578 color = vec4(r, gb, 1.0f) *
1579 vec4(r1, g1, 1.0f, a1) *
1580 vec4(inBlock.zero_red, inBlock.zero_green, inBlock.zero_blue, inBlock.zero_alpha) *
1581 vec4(inBlock.one_red, inBlock.one_green, inBlock.one_blue, inBlock.one_alpha) *
1582 vec4(xy, zw) * vec4(st, pq) * cdef * vec4(ar1, ar2, ar3, ar4);
1583 }
1584 )glsl";
1585 VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
1586
1587 const auto set_info = [&](CreatePipelineHelper &helper) {
1588 helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
1589 };
1590 CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit | kErrorBit, "", true);
1591 }
1592
TEST_F(VkPositiveLayerTest,MeshShaderPointSize)1593 TEST_F(VkPositiveLayerTest, MeshShaderPointSize) {
1594 TEST_DESCRIPTION("Test writing point size in a mesh shader.");
1595
1596 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
1597 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
1598 } else {
1599 printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
1600 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
1601 return;
1602 }
1603 ASSERT_NO_FATAL_FAILURE(InitFramework());
1604 std::array<const char *, 2> required_device_extensions = {
1605 {VK_NV_MESH_SHADER_EXTENSION_NAME, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME}};
1606 for (auto device_extension : required_device_extensions) {
1607 if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) {
1608 m_device_extension_names.push_back(device_extension);
1609 } else {
1610 printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension);
1611 return;
1612 }
1613 }
1614
1615 if (IsPlatform(kMockICD) || DeviceSimulation()) {
1616 printf("%sNot suppored by MockICD or devsim, skipping tests\n", kSkipPrefix);
1617 return;
1618 }
1619
1620 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
1621 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
1622 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
1623
1624 // Create a device that enables mesh_shader
1625 auto mesh_shader_features = LvlInitStruct<VkPhysicalDeviceMeshShaderFeaturesNV>();
1626 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&mesh_shader_features);
1627 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
1628
1629 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
1630 if (mesh_shader_features.meshShader != VK_TRUE) {
1631 printf("%s Mesh shader feature not supported\n", kSkipPrefix);
1632 return;
1633 }
1634
1635 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1636
1637 static const char meshShaderText[] = R"glsl(
1638 #version 460
1639 #extension GL_NV_mesh_shader : enable
1640 layout (local_size_x=1) in;
1641 layout (points) out;
1642 layout (max_vertices=1, max_primitives=1) out;
1643 void main ()
1644 {
1645 gl_PrimitiveCountNV = 1u;
1646 gl_PrimitiveIndicesNV[0] = 0;
1647 gl_MeshVerticesNV[0].gl_Position = vec4(-0.5, -0.5, 0.0, 1.0);
1648 gl_MeshVerticesNV[0].gl_PointSize = 4;
1649 }
1650 )glsl";
1651
1652 VkShaderObj ms(m_device, meshShaderText, VK_SHADER_STAGE_MESH_BIT_NV, this);
1653 VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
1654
1655 CreatePipelineHelper helper(*this);
1656 helper.InitInfo();
1657 helper.shader_stages_ = {ms.GetStageCreateInfo(), fs.GetStageCreateInfo()};
1658
1659 // Ensure pVertexInputState and pInputAssembly state are null, as these should be ignored.
1660 helper.gp_ci_.pVertexInputState = nullptr;
1661 helper.gp_ci_.pInputAssemblyState = nullptr;
1662
1663 helper.InitState();
1664
1665 m_errorMonitor->ExpectSuccess();
1666 helper.CreateGraphicsPipeline();
1667 m_errorMonitor->VerifyNotFound();
1668 }
1669
TEST_F(VkPositiveLayerTest,TestShaderInputAndOutputStructComponents)1670 TEST_F(VkPositiveLayerTest, TestShaderInputAndOutputStructComponents) {
1671 TEST_DESCRIPTION("Test shader interface with structs.");
1672
1673 ASSERT_NO_FATAL_FAILURE(Init());
1674
1675 // There is a crash inside the driver on S10
1676 if (IsPlatform(kGalaxyS10)) {
1677 printf("%s This test does not currently run on Galaxy S10\n", kSkipPrefix);
1678 return;
1679 }
1680
1681 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1682
1683 char const *vsSource = R"glsl(
1684 #version 450
1685
1686 struct R {
1687 vec4 rgba;
1688 };
1689
1690 layout(location = 0) out R color[3];
1691
1692 void main() {
1693 color[0].rgba = vec4(1.0f);
1694 color[1].rgba = vec4(0.5f);
1695 color[2].rgba = vec4(0.75f);
1696 }
1697 )glsl";
1698 VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
1699
1700 char const *fsSource = R"glsl(
1701 #version 450
1702
1703 struct R {
1704 vec4 rgba;
1705 };
1706
1707 layout(location = 0) in R inColor[3];
1708
1709 layout (location = 0) out vec4 color;
1710
1711 void main() {
1712 color = inColor[0].rgba * inColor[1].rgba * inColor[2].rgba;
1713 }
1714 )glsl";
1715 VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
1716
1717 const auto set_info = [&](CreatePipelineHelper &helper) {
1718 helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
1719 };
1720 CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit | kErrorBit, "", true);
1721 }
1722
TEST_F(VkPositiveLayerTest,TaskAndMeshShader)1723 TEST_F(VkPositiveLayerTest, TaskAndMeshShader) {
1724 TEST_DESCRIPTION("Test task and mesh shader");
1725
1726 SetTargetApiVersion(VK_API_VERSION_1_1);
1727 AddRequiredExtensions(VK_NV_MESH_SHADER_EXTENSION_NAME);
1728 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
1729 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
1730 printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
1731 return;
1732 }
1733 if (!AreRequestedExtensionsEnabled()) {
1734 printf("%s Extension %s is not supported, skipping test.\n", kSkipPrefix, VK_NV_MESH_SHADER_EXTENSION_NAME);
1735 return;
1736 }
1737
1738 PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
1739 (PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
1740
1741 VkPhysicalDeviceMeshShaderFeaturesNV mesh_shader_features = LvlInitStruct<VkPhysicalDeviceMeshShaderFeaturesNV>();
1742 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&mesh_shader_features);
1743 vkGetPhysicalDeviceFeatures2(gpu(), &features2);
1744 if (!mesh_shader_features.meshShader || !mesh_shader_features.taskShader) {
1745 printf("%s Test requires (unsupported) meshShader and taskShader features, skipping test.\n", kSkipPrefix);
1746 return;
1747 }
1748 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
1749 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1750
1751 PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
1752 (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
1753 ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
1754
1755 VkPhysicalDeviceVulkan11Properties vulkan11_props = LvlInitStruct<VkPhysicalDeviceVulkan11Properties>();
1756 auto properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&vulkan11_props);
1757 vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
1758
1759 if ((vulkan11_props.subgroupSupportedStages & VK_SHADER_STAGE_TASK_BIT_NV) == 0) {
1760 printf(
1761 "%s VkPhysicalDeviceVulkan11Properties::subgroupSupportedStages does not include VK_SHADER_STAGE_TASK_BIT_NV, skipping "
1762 "test.\n",
1763 kSkipPrefix);
1764 return;
1765 }
1766
1767 static const char taskShaderText[] = R"glsl(
1768 #version 450
1769
1770 #extension GL_NV_mesh_shader : require
1771 #extension GL_KHR_shader_subgroup_ballot : require
1772
1773 #define GROUP_SIZE 32
1774
1775 layout(local_size_x = 32) in;
1776
1777 taskNV out Task {
1778 uint baseID;
1779 uint subIDs[GROUP_SIZE];
1780 } OUT;
1781
1782 void main() {
1783 uvec4 desc = uvec4(gl_GlobalInvocationID.x);
1784
1785 // implement some early culling function
1786 bool render = gl_GlobalInvocationID.x < 32;
1787
1788 uvec4 vote = subgroupBallot(render);
1789 uint tasks = subgroupBallotBitCount(vote);
1790
1791 if (gl_LocalInvocationID.x == 0) {
1792 // write the number of surviving meshlets, i.e.
1793 // mesh workgroups to spawn
1794 gl_TaskCountNV = tasks;
1795
1796 // where the meshletIDs started from for this task workgroup
1797 OUT.baseID = gl_WorkGroupID.x * GROUP_SIZE;
1798 }
1799 }
1800 )glsl";
1801
1802 static const char meshShaderText[] = R"glsl(
1803 #version 450
1804
1805 #extension GL_NV_mesh_shader : require
1806
1807 layout(local_size_x = 1) in;
1808 layout(max_vertices = 3) out;
1809 layout(max_primitives = 1) out;
1810 layout(triangles) out;
1811
1812 taskNV in Task {
1813 uint baseID;
1814 uint subIDs[32];
1815 } IN;
1816
1817 void main() {
1818 uint meshletID = IN.baseID + IN.subIDs[gl_WorkGroupID.x];
1819 uvec4 desc = uvec4(meshletID);
1820 }
1821 )glsl";
1822
1823 VkShaderObj ts(m_device, taskShaderText, VK_SHADER_STAGE_TASK_BIT_NV, this, "main", false, nullptr, SPV_ENV_VULKAN_1_2);
1824 VkShaderObj ms(m_device, meshShaderText, VK_SHADER_STAGE_MESH_BIT_NV, this, "main", false, nullptr, SPV_ENV_VULKAN_1_2);
1825
1826 const auto break_vp = [&](CreatePipelineHelper &helper) {
1827 helper.shader_stages_ = {ts.GetStageCreateInfo(), ms.GetStageCreateInfo()};
1828 };
1829 CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, "", true);
1830 }
1831
TEST_F(VkPositiveLayerTest,ShaderPointSizeStructMemeberWritten)1832 TEST_F(VkPositiveLayerTest, ShaderPointSizeStructMemeberWritten) {
1833 TEST_DESCRIPTION("Write built-in PointSize within a struct");
1834 m_errorMonitor->ExpectSuccess();
1835
1836 ASSERT_NO_FATAL_FAILURE(Init());
1837 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1838
1839 const std::string vs_src = R"asm(
1840 OpCapability Shader
1841 OpMemoryModel Logical GLSL450
1842 OpEntryPoint Vertex %main "main" %14 %25 %47 %52
1843 OpSource GLSL 450
1844 OpMemberDecorate %12 0 BuiltIn Position
1845 OpMemberDecorate %12 1 BuiltIn PointSize
1846 OpMemberDecorate %12 2 BuiltIn ClipDistance
1847 OpMemberDecorate %12 3 BuiltIn CullDistance
1848 OpDecorate %12 Block
1849 OpMemberDecorate %18 0 ColMajor
1850 OpMemberDecorate %18 0 Offset 0
1851 OpMemberDecorate %18 0 MatrixStride 16
1852 OpMemberDecorate %18 1 Offset 64
1853 OpMemberDecorate %18 2 Offset 80
1854 OpDecorate %18 Block
1855 OpDecorate %25 Location 0
1856 OpDecorate %47 Location 1
1857 OpDecorate %52 Location 0
1858 %3 = OpTypeVoid
1859 %4 = OpTypeFunction %3
1860 %7 = OpTypeFloat 32
1861 %8 = OpTypeVector %7 4
1862 %9 = OpTypeInt 32 0
1863 %10 = OpConstant %9 1
1864 %11 = OpTypeArray %7 %10
1865 %12 = OpTypeStruct %8 %7 %11 %11
1866 %13 = OpTypePointer Output %12
1867 %14 = OpVariable %13 Output
1868 %15 = OpTypeInt 32 1
1869 %16 = OpConstant %15 0
1870 %17 = OpTypeMatrix %8 4
1871 %18 = OpTypeStruct %17 %7 %8
1872 %19 = OpTypePointer PushConstant %18
1873 %20 = OpVariable %19 PushConstant
1874 %21 = OpTypePointer PushConstant %17
1875 %24 = OpTypePointer Input %8
1876 %25 = OpVariable %24 Input
1877 %28 = OpTypePointer Output %8
1878 %30 = OpConstant %7 0.5
1879 %31 = OpConstant %9 2
1880 %32 = OpTypePointer Output %7
1881 %36 = OpConstant %9 3
1882 %46 = OpConstant %15 1
1883 %47 = OpVariable %24 Input
1884 %48 = OpTypePointer Input %7
1885 %52 = OpVariable %28 Output
1886 %53 = OpTypeVector %7 3
1887 %56 = OpConstant %7 1
1888 %main = OpFunction %3 None %4
1889 %6 = OpLabel
1890
1891 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1892 ; For the following, only the _first_ index of the access chain
1893 ; should be used for output validation, as subsequent indices refer
1894 ; to individual components within the output variable of interest.
1895 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1896 %22 = OpAccessChain %21 %20 %16
1897 %23 = OpLoad %17 %22
1898 %26 = OpLoad %8 %25
1899 %27 = OpMatrixTimesVector %8 %23 %26
1900 %29 = OpAccessChain %28 %14 %16
1901 OpStore %29 %27
1902 %33 = OpAccessChain %32 %14 %16 %31
1903 %34 = OpLoad %7 %33
1904 %35 = OpFMul %7 %30 %34
1905 %37 = OpAccessChain %32 %14 %16 %36
1906 %38 = OpLoad %7 %37
1907 %39 = OpFMul %7 %30 %38
1908 %40 = OpFAdd %7 %35 %39
1909 %41 = OpAccessChain %32 %14 %16 %31
1910 OpStore %41 %40
1911 %42 = OpAccessChain %32 %14 %16 %10
1912 %43 = OpLoad %7 %42
1913 %44 = OpFNegate %7 %43
1914 %45 = OpAccessChain %32 %14 %16 %10
1915 OpStore %45 %44
1916 %49 = OpAccessChain %48 %47 %36
1917 %50 = OpLoad %7 %49
1918 %51 = OpAccessChain %32 %14 %46
1919 OpStore %51 %50
1920
1921 %54 = OpLoad %8 %47
1922 %55 = OpVectorShuffle %53 %54 %54 0 1 2
1923 %57 = OpCompositeExtract %7 %55 0
1924 %58 = OpCompositeExtract %7 %55 1
1925 %59 = OpCompositeExtract %7 %55 2
1926 %60 = OpCompositeConstruct %8 %57 %58 %59 %56
1927 OpStore %52 %60
1928 OpReturn
1929 OpFunctionEnd
1930 )asm";
1931 auto vs = VkShaderObj::CreateFromASM(*m_device, *this, VK_SHADER_STAGE_VERTEX_BIT, vs_src, "main");
1932
1933 if (vs) {
1934 VkPushConstantRange push_constant_ranges[1]{{VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(float) * (16 + 4 + 1)}};
1935
1936 VkPipelineLayoutCreateInfo const pipeline_layout_info{
1937 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, 0, 0, nullptr, 1, push_constant_ranges};
1938
1939 VkVertexInputBindingDescription input_binding[2] = {
1940 {0, 16, VK_VERTEX_INPUT_RATE_VERTEX},
1941 {1, 16, VK_VERTEX_INPUT_RATE_VERTEX},
1942 };
1943 VkVertexInputAttributeDescription input_attribs[2] = {
1944 {0, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0},
1945 {1, 1, VK_FORMAT_R32G32B32A32_SFLOAT, 0},
1946 };
1947
1948 CreatePipelineHelper pipe(*this);
1949 pipe.InitInfo();
1950 pipe.shader_stages_ = {vs->GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()};
1951 pipe.pipeline_layout_ci_ = pipeline_layout_info;
1952 pipe.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1953 pipe.vi_ci_.pVertexBindingDescriptions = input_binding;
1954 pipe.vi_ci_.vertexBindingDescriptionCount = 2;
1955 pipe.vi_ci_.pVertexAttributeDescriptions = input_attribs;
1956 pipe.vi_ci_.vertexAttributeDescriptionCount = 2;
1957 pipe.InitState();
1958 pipe.CreateGraphicsPipeline();
1959 } else {
1960 printf("%s Error creating shader from assembly\n", kSkipPrefix);
1961 }
1962 m_errorMonitor->VerifyNotFound();
1963 }
1964
TEST_F(VkPositiveLayerTest,Std430SpirvOptFlags10)1965 TEST_F(VkPositiveLayerTest, Std430SpirvOptFlags10) {
1966 TEST_DESCRIPTION("Reproduces issue 3442 where spirv-opt fails to set layout flags options using Vulkan 1.0");
1967 // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/3442
1968
1969 AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
1970 AddRequiredExtensions(VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME);
1971 AddRequiredExtensions(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME);
1972
1973 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
1974
1975 if (!AreRequestedExtensionsEnabled()) {
1976 printf("%s test required extensions not available. Skipping.\n", kSkipPrefix);
1977 return;
1978 }
1979
1980 PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
1981 (PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
1982
1983 auto uniform_buffer_standard_layout_features = LvlInitStruct<VkPhysicalDeviceUniformBufferStandardLayoutFeatures>();
1984 auto scalar_block_layout_features =
1985 LvlInitStruct<VkPhysicalDeviceScalarBlockLayoutFeatures>(&uniform_buffer_standard_layout_features);
1986 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&scalar_block_layout_features);
1987 vkGetPhysicalDeviceFeatures2(gpu(), &features2);
1988
1989 if (scalar_block_layout_features.scalarBlockLayout == VK_FALSE ||
1990 uniform_buffer_standard_layout_features.uniformBufferStandardLayout == VK_FALSE) {
1991 printf("%s scalarBlockLayout and uniformBufferStandardLayout are not supported Skipping.\n", kSkipPrefix);
1992 return;
1993 }
1994
1995 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
1996 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1997
1998 const VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
1999
2000 const char *fragment_source = R"glsl(
2001 #version 450
2002 #extension GL_ARB_separate_shader_objects:enable
2003 #extension GL_EXT_samplerless_texture_functions:require
2004 #extension GL_EXT_nonuniform_qualifier : require
2005 #extension GL_EXT_scalar_block_layout : require
2006
2007 layout(std430, set=0,binding=0)uniform UniformBufferObject{
2008 mat4 view;
2009 mat4 proj;
2010 vec4 lightPositions[1];
2011 int SliceCutoffs[6];
2012 }ubo;
2013
2014 // this specialization constant triggers the validation layer to recompile the shader
2015 // which causes the error related to the above uniform
2016 layout(constant_id = 0) const float spec = 10.0f;
2017
2018 layout(location=0) out vec4 frag_color;
2019 void main() {
2020 frag_color = vec4(ubo.lightPositions[0]) * spec;
2021 }
2022 )glsl";
2023
2024 // Force a random value to replace the default to trigger shader val logic to replace it
2025 float data = 2.0f;
2026 VkSpecializationMapEntry entry = {0, 0, sizeof(float)};
2027 VkSpecializationInfo specialization_info = {1, &entry, sizeof(float), &data};
2028 const VkShaderObj fs(m_device, fragment_source, VK_SHADER_STAGE_FRAGMENT_BIT, this, "main", false, &specialization_info,
2029 SPV_ENV_VULKAN_1_0);
2030
2031 CreatePipelineHelper pipe(*this);
2032 pipe.InitInfo();
2033 pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}};
2034 pipe.InitState();
2035 pipe.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
2036 pipe.CreateGraphicsPipeline();
2037 m_errorMonitor->VerifyNotFound();
2038 }
2039
TEST_F(VkPositiveLayerTest,Std430SpirvOptFlags12)2040 TEST_F(VkPositiveLayerTest, Std430SpirvOptFlags12) {
2041 TEST_DESCRIPTION("Reproduces issue 3442 where spirv-opt fails to set layout flags options using Vulkan 1.2");
2042 // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/3442
2043
2044 m_errorMonitor->ExpectSuccess();
2045 SetTargetApiVersion(VK_API_VERSION_1_2);
2046 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
2047
2048 if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
2049 printf("%s Tests requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
2050 return;
2051 }
2052
2053 auto features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>();
2054 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12);
2055 vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
2056
2057 if (features12.scalarBlockLayout == VK_FALSE || features12.uniformBufferStandardLayout == VK_FALSE) {
2058 printf("%s scalarBlockLayout and uniformBufferStandardLayout are not supported Skipping.\n", kSkipPrefix);
2059 return;
2060 }
2061
2062 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
2063 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
2064
2065 const VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
2066
2067 const char *fragment_source = R"glsl(
2068 #version 450
2069 #extension GL_ARB_separate_shader_objects:enable
2070 #extension GL_EXT_samplerless_texture_functions:require
2071 #extension GL_EXT_nonuniform_qualifier : require
2072 #extension GL_EXT_scalar_block_layout : require
2073
2074 layout(std430, set=0,binding=0)uniform UniformBufferObject{
2075 mat4 view;
2076 mat4 proj;
2077 vec4 lightPositions[1];
2078 int SliceCutoffs[6];
2079 }ubo;
2080
2081 // this specialization constant triggers the validation layer to recompile the shader
2082 // which causes the error related to the above uniform
2083 layout(constant_id = 0) const float spec = 10.0f;
2084
2085 layout(location=0) out vec4 frag_color;
2086 void main() {
2087 frag_color = vec4(ubo.lightPositions[0]) * spec;
2088 }
2089 )glsl";
2090
2091 // Force a random value to replace the default to trigger shader val logic to replace it
2092 float data = 2.0f;
2093 VkSpecializationMapEntry entry = {0, 0, sizeof(float)};
2094 VkSpecializationInfo specialization_info = {1, &entry, sizeof(float), &data};
2095 const VkShaderObj fs(m_device, fragment_source, VK_SHADER_STAGE_FRAGMENT_BIT, this, "main", false, &specialization_info,
2096 SPV_ENV_VULKAN_1_0);
2097
2098 CreatePipelineHelper pipe(*this);
2099 pipe.InitInfo();
2100 pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}};
2101 pipe.InitState();
2102 pipe.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
2103 pipe.CreateGraphicsPipeline();
2104 m_errorMonitor->VerifyNotFound();
2105 }
2106
TEST_F(VkPositiveLayerTest,WriteDescriptorSetAccelerationStructureNVNullDescriptor)2107 TEST_F(VkPositiveLayerTest, WriteDescriptorSetAccelerationStructureNVNullDescriptor) {
2108 TEST_DESCRIPTION("Validate using NV acceleration structure descriptor writing with null descriptor.");
2109
2110 AddRequiredExtensions(VK_NV_RAY_TRACING_EXTENSION_NAME);
2111 AddRequiredExtensions(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME);
2112 ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
2113
2114 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
2115 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
2116 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
2117
2118 auto robustness2_features = LvlInitStruct<VkPhysicalDeviceRobustness2FeaturesEXT>();
2119 auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&robustness2_features);
2120 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
2121
2122 if (robustness2_features.nullDescriptor != VK_TRUE) {
2123 printf("%s nullDescriptor feature not supported, skipping test.\n", kSkipPrefix);
2124 return;
2125 }
2126 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
2127
2128 m_errorMonitor->ExpectSuccess();
2129
2130 OneOffDescriptorSet ds(m_device, {
2131 {0, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, 1, VK_SHADER_STAGE_MISS_BIT_NV, nullptr},
2132 });
2133
2134 VkAccelerationStructureNV top_level_as = VK_NULL_HANDLE;
2135
2136 VkWriteDescriptorSetAccelerationStructureNV acc = {};
2137 acc.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV;
2138 acc.accelerationStructureCount = 1;
2139 acc.pAccelerationStructures = &top_level_as;
2140
2141 VkWriteDescriptorSet descriptor_write = {};
2142 descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
2143 descriptor_write.pNext = &acc;
2144 descriptor_write.dstSet = ds.set_;
2145 descriptor_write.dstBinding = 0;
2146 descriptor_write.descriptorCount = 1;
2147 descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV;
2148
2149 vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, nullptr);
2150
2151 m_errorMonitor->VerifyNotFound();
2152 }
2153