1 /*
2 * Copyright (c) 2020 The Khronos Group Inc.
3 * Copyright (c) 2020 Valve Corporation
4 * Copyright (c) 2020 LunarG, Inc.
5 * Copyright (c) 2020 Google, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Author: Mark Lobodzinski <mark@lunarg.com>
14 * Author: Tony Barbour <tony@LunarG.com>
15 */
16
17 #include "layer_validation_tests.h"
18
InitGpuAssistedFramework(bool request_descriptor_indexing)19 bool VkGpuAssistedLayerTest::InitGpuAssistedFramework(bool request_descriptor_indexing) {
20 VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT};
21 VkValidationFeatureDisableEXT disables[] = {
22 VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT, VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT,
23 VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT, VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT};
24 VkValidationFeaturesEXT features = {};
25 features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
26 features.enabledValidationFeatureCount = 1;
27 features.disabledValidationFeatureCount = 4;
28 features.pEnabledValidationFeatures = enables;
29 features.pDisabledValidationFeatures = disables;
30
31 if (request_descriptor_indexing) {
32 return CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names, m_device_extension_names, &features,
33 m_errorMonitor);
34 }
35
36 InitFramework(m_errorMonitor, &features);
37 return false;
38 }
39
TEST_F(VkGpuAssistedLayerTest,GpuValidationArrayOOBGraphicsShaders)40 TEST_F(VkGpuAssistedLayerTest, GpuValidationArrayOOBGraphicsShaders) {
41 TEST_DESCRIPTION(
42 "GPU validation: Verify detection of out-of-bounds descriptor array indexing and use of uninitialized descriptors.");
43
44 SetTargetApiVersion(VK_API_VERSION_1_1);
45 bool descriptor_indexing = InitGpuAssistedFramework(true);
46
47 if (IsPlatform(kGalaxyS10)) {
48 printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
49 return;
50 }
51
52 if (IsPlatform(kNexusPlayer)) {
53 printf("%s This test should not run on Nexus Player\n", kSkipPrefix);
54 return;
55 }
56
57 if (IsPlatform(kMockICD) || DeviceSimulation()) {
58 printf("%s GPU-Assisted validation test requires a driver that can draw.\n", kSkipPrefix);
59 return;
60 }
61
62 VkPhysicalDeviceFeatures2KHR features2 = {};
63 auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
64 if (descriptor_indexing) {
65 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
66 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
67 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
68
69 features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features);
70 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
71
72 if (!indexing_features.runtimeDescriptorArray || !indexing_features.descriptorBindingSampledImageUpdateAfterBind ||
73 !indexing_features.descriptorBindingPartiallyBound || !indexing_features.descriptorBindingVariableDescriptorCount ||
74 !indexing_features.shaderSampledImageArrayNonUniformIndexing ||
75 !indexing_features.shaderStorageBufferArrayNonUniformIndexing) {
76 printf("Not all descriptor indexing features supported, skipping descriptor indexing tests\n");
77 descriptor_indexing = false;
78 }
79 }
80
81 VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
82 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags));
83 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
84 printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix);
85 return;
86 }
87 ASSERT_NO_FATAL_FAILURE(InitViewport());
88 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
89
90 // Make a uniform buffer to be passed to the shader that contains the invalid array index.
91 uint32_t qfi = 0;
92 VkBufferCreateInfo bci = {};
93 bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
94 bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
95 bci.size = 1024;
96 bci.queueFamilyIndexCount = 1;
97 bci.pQueueFamilyIndices = &qfi;
98 VkBufferObj buffer0;
99 VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
100 buffer0.init(*m_device, bci, mem_props);
101
102 bci.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
103 // Make another buffer to populate the buffer array to be indexed
104 VkBufferObj buffer1;
105 buffer1.init(*m_device, bci, mem_props);
106
107 void *layout_pnext = nullptr;
108 void *allocate_pnext = nullptr;
109 auto pool_create_flags = 0;
110 auto layout_create_flags = 0;
111 VkDescriptorBindingFlagsEXT ds_binding_flags[2] = {};
112 VkDescriptorSetLayoutBindingFlagsCreateInfoEXT layout_createinfo_binding_flags[1] = {};
113 if (descriptor_indexing) {
114 ds_binding_flags[0] = 0;
115 ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT;
116
117 layout_createinfo_binding_flags[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT;
118 layout_createinfo_binding_flags[0].pNext = NULL;
119 layout_createinfo_binding_flags[0].bindingCount = 2;
120 layout_createinfo_binding_flags[0].pBindingFlags = ds_binding_flags;
121 layout_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
122 pool_create_flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
123 layout_pnext = layout_createinfo_binding_flags;
124 }
125
126 // Prepare descriptors
127 OneOffDescriptorSet descriptor_set(m_device,
128 {
129 {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
130 {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 6, VK_SHADER_STAGE_ALL, nullptr},
131 },
132 layout_create_flags, layout_pnext, pool_create_flags);
133
134 VkDescriptorSetVariableDescriptorCountAllocateInfoEXT variable_count = {};
135 uint32_t desc_counts;
136 if (descriptor_indexing) {
137 layout_create_flags = 0;
138 pool_create_flags = 0;
139 ds_binding_flags[1] =
140 VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT;
141 desc_counts = 6; // We'll reserve 8 spaces in the layout, but the descriptor will only use 6
142 variable_count.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT;
143 variable_count.descriptorSetCount = 1;
144 variable_count.pDescriptorCounts = &desc_counts;
145 allocate_pnext = &variable_count;
146 }
147
148 OneOffDescriptorSet descriptor_set_variable(m_device,
149 {
150 {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
151 {1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 8, VK_SHADER_STAGE_ALL, nullptr},
152 },
153 layout_create_flags, layout_pnext, pool_create_flags, allocate_pnext);
154
155 const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
156 const VkPipelineLayoutObj pipeline_layout_variable(m_device, {&descriptor_set_variable.layout_});
157 VkTextureObj texture(m_device, nullptr);
158 VkSamplerObj sampler(m_device);
159
160 VkDescriptorBufferInfo buffer_info[1] = {};
161 buffer_info[0].buffer = buffer0.handle();
162 buffer_info[0].offset = 0;
163 buffer_info[0].range = sizeof(uint32_t);
164
165 VkDescriptorImageInfo image_info[6] = {};
166 for (int i = 0; i < 6; i++) {
167 image_info[i] = texture.DescriptorImageInfo();
168 image_info[i].sampler = sampler.handle();
169 image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
170 }
171
172 VkWriteDescriptorSet descriptor_writes[2] = {};
173 descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
174 descriptor_writes[0].dstSet = descriptor_set.set_; // descriptor_set;
175 descriptor_writes[0].dstBinding = 0;
176 descriptor_writes[0].descriptorCount = 1;
177 descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
178 descriptor_writes[0].pBufferInfo = buffer_info;
179 descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
180 descriptor_writes[1].dstSet = descriptor_set.set_; // descriptor_set;
181 descriptor_writes[1].dstBinding = 1;
182 if (descriptor_indexing)
183 descriptor_writes[1].descriptorCount = 5; // Intentionally don't write index 5
184 else
185 descriptor_writes[1].descriptorCount = 6;
186 descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
187 descriptor_writes[1].pImageInfo = image_info;
188 vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL);
189 if (descriptor_indexing) {
190 descriptor_writes[0].dstSet = descriptor_set_variable.set_;
191 descriptor_writes[1].dstSet = descriptor_set_variable.set_;
192 vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL);
193 }
194
195 ds_binding_flags[0] = 0;
196 ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT;
197
198 // Resources for buffer tests
199 OneOffDescriptorSet descriptor_set_buffer(m_device,
200 {
201 {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
202 {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 6, VK_SHADER_STAGE_ALL, nullptr},
203 },
204 0, layout_pnext, 0);
205
206 const VkPipelineLayoutObj pipeline_layout_buffer(m_device, {&descriptor_set_buffer.layout_});
207
208 VkDescriptorBufferInfo buffer_test_buffer_info[7] = {};
209 buffer_test_buffer_info[0].buffer = buffer0.handle();
210 buffer_test_buffer_info[0].offset = 0;
211 buffer_test_buffer_info[0].range = sizeof(uint32_t);
212
213 for (int i = 1; i < 7; i++) {
214 buffer_test_buffer_info[i].buffer = buffer1.handle();
215 buffer_test_buffer_info[i].offset = 0;
216 buffer_test_buffer_info[i].range = 4 * sizeof(float);
217 }
218
219 if (descriptor_indexing) {
220 VkWriteDescriptorSet buffer_descriptor_writes[2] = {};
221 buffer_descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
222 buffer_descriptor_writes[0].dstSet = descriptor_set_buffer.set_; // descriptor_set;
223 buffer_descriptor_writes[0].dstBinding = 0;
224 buffer_descriptor_writes[0].descriptorCount = 1;
225 buffer_descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
226 buffer_descriptor_writes[0].pBufferInfo = buffer_test_buffer_info;
227 buffer_descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
228 buffer_descriptor_writes[1].dstSet = descriptor_set_buffer.set_; // descriptor_set;
229 buffer_descriptor_writes[1].dstBinding = 1;
230 buffer_descriptor_writes[1].descriptorCount = 5; // Intentionally don't write index 5
231 buffer_descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
232 buffer_descriptor_writes[1].pBufferInfo = &buffer_test_buffer_info[1];
233 vk::UpdateDescriptorSets(m_device->device(), 2, buffer_descriptor_writes, 0, NULL);
234 }
235
236 // Shader programs for array OOB test in vertex stage:
237 // - The vertex shader fetches the invalid index from the uniform buffer and uses it to make an invalid index into another
238 // array.
239 char const *vsSource_vert =
240 "#version 450\n"
241 "\n"
242 "layout(std140, set = 0, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n"
243 "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n"
244 "vec2 vertices[3];\n"
245 "void main(){\n"
246 " vertices[0] = vec2(-1.0, -1.0);\n"
247 " vertices[1] = vec2( 1.0, -1.0);\n"
248 " vertices[2] = vec2( 0.0, 1.0);\n"
249 " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n"
250 " gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));\n"
251 "}\n";
252 char const *fsSource_vert =
253 "#version 450\n"
254 "\n"
255 "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n"
256 "layout(location = 0) out vec4 uFragColor;\n"
257 "void main(){\n"
258 " uFragColor = texture(tex[0], vec2(0, 0));\n"
259 "}\n";
260
261 // Shader programs for array OOB test in fragment stage:
262 // - The vertex shader fetches the invalid index from the uniform buffer and passes it to the fragment shader.
263 // - The fragment shader makes the invalid array access.
264 char const *vsSource_frag =
265 "#version 450\n"
266 "\n"
267 "layout(std140, binding = 0) uniform foo { uint tex_index[1]; } uniform_index_buffer;\n"
268 "layout(location = 0) out flat uint index;\n"
269 "vec2 vertices[3];\n"
270 "void main(){\n"
271 " vertices[0] = vec2(-1.0, -1.0);\n"
272 " vertices[1] = vec2( 1.0, -1.0);\n"
273 " vertices[2] = vec2( 0.0, 1.0);\n"
274 " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n"
275 " index = uniform_index_buffer.tex_index[0];\n"
276 "}\n";
277 char const *fsSource_frag =
278 "#version 450\n"
279 "\n"
280 "layout(set = 0, binding = 1) uniform sampler2D tex[6];\n"
281 "layout(location = 0) out vec4 uFragColor;\n"
282 "layout(location = 0) in flat uint index;\n"
283 "void main(){\n"
284 " uFragColor = texture(tex[index], vec2(0, 0));\n"
285 "}\n";
286 char const *fsSource_frag_runtime =
287 "#version 450\n"
288 "#extension GL_EXT_nonuniform_qualifier : enable\n"
289 "\n"
290 "layout(set = 0, binding = 1) uniform sampler2D tex[];\n"
291 "layout(location = 0) out vec4 uFragColor;\n"
292 "layout(location = 0) in flat uint index;\n"
293 "void main(){\n"
294 " uFragColor = texture(tex[index], vec2(0, 0));\n"
295 "}\n";
296 char const *fsSource_buffer =
297 "#version 450\n"
298 "#extension GL_EXT_nonuniform_qualifier : enable\n "
299 "\n"
300 "layout(set = 0, binding = 1) buffer foo { vec4 val; } colors[];\n"
301 "layout(location = 0) out vec4 uFragColor;\n"
302 "layout(location = 0) in flat uint index;\n"
303 "void main(){\n"
304 " uFragColor = colors[index].val;\n"
305 "}\n";
306 char const *gsSource =
307 "#version 450\n"
308 "#extension GL_EXT_nonuniform_qualifier : enable\n "
309 "layout(triangles) in;\n"
310 "layout(triangle_strip, max_vertices=3) out;\n"
311 "layout(location=0) in VertexData { vec4 x; } gs_in[];\n"
312 "layout(std140, set = 0, binding = 0) uniform ufoo { uint index; } uniform_index_buffer;\n"
313 "layout(set = 0, binding = 1) buffer bfoo { vec4 val; } adds[];\n"
314 "void main() {\n"
315 " gl_Position = gs_in[0].x + adds[uniform_index_buffer.index].val.x;\n"
316 " EmitVertex();\n"
317 "}\n";
318 static const char *tesSource =
319 "#version 450\n"
320 "#extension GL_EXT_nonuniform_qualifier : enable\n "
321 "layout(std140, set = 0, binding = 0) uniform ufoo { uint index; } uniform_index_buffer;\n"
322 "layout(set = 0, binding = 1) buffer bfoo { vec4 val; } adds[];\n"
323 "layout(triangles, equal_spacing, cw) in;\n"
324 "void main() {\n"
325 " gl_Position = adds[uniform_index_buffer.index].val;\n"
326 "}\n";
327
328 struct TestCase {
329 char const *vertex_source;
330 char const *fragment_source;
331 char const *geometry_source;
332 char const *tess_ctrl_source;
333 char const *tess_eval_source;
334 bool debug;
335 const VkPipelineLayoutObj *pipeline_layout;
336 const OneOffDescriptorSet *descriptor_set;
337 uint32_t index;
338 char const *expected_error;
339 };
340
341 std::vector<TestCase> tests;
342 tests.push_back({vsSource_vert, fsSource_vert, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 25,
343 "Index of 25 used to index descriptor array of length 6."});
344 tests.push_back({vsSource_frag, fsSource_frag, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set, 25,
345 "Index of 25 used to index descriptor array of length 6."});
346 #if !defined(ANDROID)
347 // The Android test framework uses shaderc for online compilations. Even when configured to compile with debug info,
348 // shaderc seems to drop the OpLine instructions from the shader binary. This causes the following two tests to fail
349 // on Android platforms. Skip these tests until the shaderc issue is understood/resolved.
350 tests.push_back({vsSource_vert, fsSource_vert, nullptr, nullptr, nullptr, true, &pipeline_layout, &descriptor_set, 25,
351 "gl_Position += 1e-30 * texture(tex[uniform_index_buffer.tex_index[0]], vec2(0, 0));"});
352 tests.push_back({vsSource_frag, fsSource_frag, nullptr, nullptr, nullptr, true, &pipeline_layout, &descriptor_set, 25,
353 "uFragColor = texture(tex[index], vec2(0, 0));"});
354 #endif
355 if (descriptor_indexing) {
356 tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set,
357 25, "Index of 25 used to index descriptor array of length 6."});
358 tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout, &descriptor_set,
359 5, "Descriptor index 5 is uninitialized"});
360 // Pick 6 below because it is less than the maximum specified, but more than the actual specified
361 tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout_variable,
362 &descriptor_set_variable, 6, "Index of 6 used to index descriptor array of length 6."});
363 tests.push_back({vsSource_frag, fsSource_frag_runtime, nullptr, nullptr, nullptr, false, &pipeline_layout_variable,
364 &descriptor_set_variable, 5, "Descriptor index 5 is uninitialized"});
365 tests.push_back({vsSource_frag, fsSource_buffer, nullptr, nullptr, nullptr, false, &pipeline_layout_buffer,
366 &descriptor_set_buffer, 25, "Index of 25 used to index descriptor array of length 6."});
367 tests.push_back({vsSource_frag, fsSource_buffer, nullptr, nullptr, nullptr, false, &pipeline_layout_buffer,
368 &descriptor_set_buffer, 5, "Descriptor index 5 is uninitialized"});
369 if (m_device->phy().features().geometryShader) {
370 // OOB Geometry
371 tests.push_back({bindStateVertShaderText, bindStateFragShaderText, gsSource, nullptr, nullptr, false,
372 &pipeline_layout_buffer, &descriptor_set_buffer, 25, "Stage = Geometry"});
373 // Uninitialized Geometry
374 tests.push_back({bindStateVertShaderText, bindStateFragShaderText, gsSource, nullptr, nullptr, false,
375 &pipeline_layout_buffer, &descriptor_set_buffer, 5, "Stage = Geometry"});
376 }
377 if (m_device->phy().features().tessellationShader) {
378 tests.push_back({bindStateVertShaderText, bindStateFragShaderText, nullptr, bindStateTscShaderText, tesSource, false,
379 &pipeline_layout_buffer, &descriptor_set_buffer, 25, "Stage = Tessellation Eval"});
380 tests.push_back({bindStateVertShaderText, bindStateFragShaderText, nullptr, bindStateTscShaderText, tesSource, false,
381 &pipeline_layout_buffer, &descriptor_set_buffer, 5, "Stage = Tessellation Eval"});
382 }
383 }
384
385 VkViewport viewport = m_viewports[0];
386 VkRect2D scissors = m_scissors[0];
387
388 VkSubmitInfo submit_info = {};
389 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
390 submit_info.commandBufferCount = 1;
391 submit_info.pCommandBuffers = &m_commandBuffer->handle();
392
393 for (const auto &iter : tests) {
394 VkResult err;
395 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, iter.expected_error);
396 VkShaderObj vs(m_device, iter.vertex_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", iter.debug);
397 VkShaderObj fs(m_device, iter.fragment_source, VK_SHADER_STAGE_FRAGMENT_BIT, this, "main", iter.debug);
398 VkShaderObj *gs = nullptr;
399 VkShaderObj *tcs = nullptr;
400 VkShaderObj *tes = nullptr;
401 VkPipelineObj pipe(m_device);
402 pipe.AddShader(&vs);
403 pipe.AddShader(&fs);
404 if (iter.geometry_source) {
405 gs = new VkShaderObj(m_device, iter.geometry_source, VK_SHADER_STAGE_GEOMETRY_BIT, this, "main", iter.debug);
406 pipe.AddShader(gs);
407 }
408 if (iter.tess_ctrl_source && iter.tess_eval_source) {
409 tcs = new VkShaderObj(m_device, iter.tess_ctrl_source, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this, "main",
410 iter.debug);
411 tes = new VkShaderObj(m_device, iter.tess_eval_source, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this, "main",
412 iter.debug);
413 pipe.AddShader(tcs);
414 pipe.AddShader(tes);
415 VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
416 VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
417 VkPipelineTessellationDomainOriginStateCreateInfo tessellationDomainOriginStateInfo = {
418 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, VK_NULL_HANDLE,
419 VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT};
420
421 VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
422 &tessellationDomainOriginStateInfo, 0, 3};
423 pipe.SetTessellation(&tsci);
424 pipe.SetInputAssembly(&iasci);
425 }
426 pipe.AddDefaultColorAttachment();
427 err = pipe.CreateVKPipeline(iter.pipeline_layout->handle(), renderPass());
428 ASSERT_VK_SUCCESS(err);
429 m_commandBuffer->begin();
430 m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
431 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
432 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, iter.pipeline_layout->handle(), 0, 1,
433 &iter.descriptor_set->set_, 0, nullptr);
434 vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
435 vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors);
436 vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
437 vk::CmdEndRenderPass(m_commandBuffer->handle());
438 m_commandBuffer->end();
439 uint32_t *data = (uint32_t *)buffer0.memory().map();
440 data[0] = iter.index;
441 buffer0.memory().unmap();
442
443 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
444 vk::QueueWaitIdle(m_device->m_queue);
445 m_errorMonitor->VerifyFound();
446 if (gs) {
447 delete gs;
448 }
449 if (tcs && tes) {
450 delete tcs;
451 delete tes;
452 }
453 }
454 auto c_queue = m_device->GetDefaultComputeQueue();
455 if (c_queue && descriptor_indexing) {
456 char const *csSource =
457 "#version 450\n"
458 "#extension GL_EXT_nonuniform_qualifier : enable\n "
459 "layout(set = 0, binding = 0) uniform ufoo { uint index; } u_index;"
460 "layout(set = 0, binding = 1) buffer StorageBuffer {\n"
461 " uint data;\n"
462 "} Data[];\n"
463 "void main() {\n"
464 " Data[(u_index.index - 1)].data = Data[u_index.index].data;\n"
465 "}\n";
466
467 auto shader_module = new VkShaderObj(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
468
469 VkPipelineShaderStageCreateInfo stage;
470 stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
471 stage.pNext = nullptr;
472 stage.flags = 0;
473 stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
474 stage.module = shader_module->handle();
475 stage.pName = "main";
476 stage.pSpecializationInfo = nullptr;
477
478 // CreateComputePipelines
479 VkComputePipelineCreateInfo pipeline_info = {};
480 pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
481 pipeline_info.pNext = nullptr;
482 pipeline_info.flags = 0;
483 pipeline_info.layout = pipeline_layout_buffer.handle();
484 pipeline_info.basePipelineHandle = VK_NULL_HANDLE;
485 pipeline_info.basePipelineIndex = -1;
486 pipeline_info.stage = stage;
487
488 VkPipeline c_pipeline;
489 vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &c_pipeline);
490 VkCommandBufferBeginInfo begin_info = {};
491 VkCommandBufferInheritanceInfo hinfo = {};
492 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
493 begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
494 begin_info.pInheritanceInfo = &hinfo;
495
496 m_commandBuffer->begin(&begin_info);
497 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, c_pipeline);
498 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout_buffer.handle(), 0, 1,
499 &descriptor_set_buffer.set_, 0, nullptr);
500 vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1);
501 m_commandBuffer->end();
502
503 // Uninitialized
504 uint32_t *data = (uint32_t *)buffer0.memory().map();
505 data[0] = 5;
506 buffer0.memory().unmap();
507 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Stage = Compute");
508 vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE);
509 vk::QueueWaitIdle(m_device->m_queue);
510 m_errorMonitor->VerifyFound();
511 // Out of Bounds
512 data = (uint32_t *)buffer0.memory().map();
513 data[0] = 25;
514 buffer0.memory().unmap();
515 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Stage = Compute");
516 vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE);
517 vk::QueueWaitIdle(m_device->m_queue);
518 m_errorMonitor->VerifyFound();
519 vk::DestroyPipeline(m_device->handle(), c_pipeline, NULL);
520 vk::DestroyShaderModule(m_device->handle(), shader_module->handle(), NULL);
521 }
522 return;
523 }
524
TEST_F(VkGpuAssistedLayerTest,GpuBufferDeviceAddressOOB)525 TEST_F(VkGpuAssistedLayerTest, GpuBufferDeviceAddressOOB) {
526 SetTargetApiVersion(VK_API_VERSION_1_1);
527 bool supported = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
528 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
529
530 InitGpuAssistedFramework(false);
531 if (IsPlatform(kMockICD) || DeviceSimulation()) {
532 printf("%s GPU-Assisted validation test requires a driver that can draw.\n", kSkipPrefix);
533 return;
534 }
535
536 supported = supported && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
537 m_device_extension_names.push_back(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
538
539 VkPhysicalDeviceFeatures2KHR features2 = {};
540 auto bda_features = lvl_init_struct<VkPhysicalDeviceBufferDeviceAddressFeaturesKHR>();
541 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
542 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
543 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
544
545 features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&bda_features);
546 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
547 supported = supported && bda_features.bufferDeviceAddress;
548
549 if (!supported) {
550 printf("%s Buffer Device Address feature not supported, skipping test\n", kSkipPrefix);
551 return;
552 }
553
554 bool mesh_shader_supported = DeviceExtensionSupported(gpu(), nullptr, VK_NV_MESH_SHADER_EXTENSION_NAME);
555 if (mesh_shader_supported) {
556 m_device_extension_names.push_back(VK_NV_MESH_SHADER_EXTENSION_NAME);
557 }
558
559 VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
560 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags));
561 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
562 printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix);
563 return;
564 }
565 ASSERT_NO_FATAL_FAILURE(InitViewport());
566 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
567
568 // Make a uniform buffer to be passed to the shader that contains the pointer and write count
569 uint32_t qfi = 0;
570 VkBufferCreateInfo bci = {};
571 bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
572 bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
573 bci.size = 8;
574 bci.queueFamilyIndexCount = 1;
575 bci.pQueueFamilyIndices = &qfi;
576 VkBufferObj buffer0;
577 VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
578 buffer0.init(*m_device, bci, mem_props);
579
580 // Make another buffer to write to
581 bci.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR;
582 bci.size = 64; // Buffer should be 16*4 = 64 bytes
583 VkBuffer buffer1;
584 vk::CreateBuffer(device(), &bci, NULL, &buffer1);
585 VkMemoryRequirements buffer_mem_reqs = {};
586 vk::GetBufferMemoryRequirements(device(), buffer1, &buffer_mem_reqs);
587 VkMemoryAllocateInfo buffer_alloc_info = {};
588 buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
589 buffer_alloc_info.allocationSize = buffer_mem_reqs.size;
590 m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0);
591 VkMemoryAllocateFlagsInfo alloc_flags = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO};
592 alloc_flags.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
593 buffer_alloc_info.pNext = &alloc_flags;
594 VkDeviceMemory buffer_mem;
595 VkResult err = vk::AllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem);
596 ASSERT_VK_SUCCESS(err);
597 vk::BindBufferMemory(m_device->device(), buffer1, buffer_mem, 0);
598
599 // Get device address of buffer to write to
600 VkBufferDeviceAddressInfoKHR bda_info = {};
601 bda_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR;
602 bda_info.buffer = buffer1;
603 auto vkGetBufferDeviceAddressKHR =
604 (PFN_vkGetBufferDeviceAddressKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferDeviceAddressKHR");
605 ASSERT_TRUE(vkGetBufferDeviceAddressKHR != nullptr);
606 auto pBuffer = vkGetBufferDeviceAddressKHR(m_device->device(), &bda_info);
607
608 OneOffDescriptorSet descriptor_set(m_device, {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}});
609
610 const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
611 VkDescriptorBufferInfo buffer_test_buffer_info[2] = {};
612 buffer_test_buffer_info[0].buffer = buffer0.handle();
613 buffer_test_buffer_info[0].offset = 0;
614 buffer_test_buffer_info[0].range = sizeof(uint32_t);
615
616 VkWriteDescriptorSet descriptor_writes[1] = {};
617 descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
618 descriptor_writes[0].dstSet = descriptor_set.set_;
619 descriptor_writes[0].dstBinding = 0;
620 descriptor_writes[0].descriptorCount = 1;
621 descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
622 descriptor_writes[0].pBufferInfo = buffer_test_buffer_info;
623 vk::UpdateDescriptorSets(m_device->device(), 1, descriptor_writes, 0, NULL);
624
625 char const *shader_source =
626 "#version 450\n"
627 "#extension GL_EXT_buffer_reference : enable\n "
628 "layout(buffer_reference, buffer_reference_align = 16) buffer bufStruct;\n"
629 "layout(set = 0, binding = 0) uniform ufoo {\n"
630 " bufStruct data;\n"
631 " int nWrites;\n"
632 "} u_info;\n"
633 "layout(buffer_reference, std140) buffer bufStruct {\n"
634 " int a[4];\n"
635 "};\n"
636 "void main() {\n"
637 " for (int i=0; i < u_info.nWrites; ++i) {\n"
638 " u_info.data.a[i] = 0xdeadca71;\n"
639 " }\n"
640 "}\n";
641 VkShaderObj vs(m_device, shader_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", true);
642
643 VkViewport viewport = m_viewports[0];
644 VkRect2D scissors = m_scissors[0];
645
646 VkSubmitInfo submit_info = {};
647 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
648 submit_info.commandBufferCount = 1;
649 submit_info.pCommandBuffers = &m_commandBuffer->handle();
650
651 VkPipelineObj pipe(m_device);
652 pipe.AddShader(&vs);
653 pipe.AddDefaultColorAttachment();
654 err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
655 ASSERT_VK_SUCCESS(err);
656
657 VkCommandBufferBeginInfo begin_info = {};
658 VkCommandBufferInheritanceInfo hinfo = {};
659 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
660 begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
661 begin_info.pInheritanceInfo = &hinfo;
662
663 m_commandBuffer->begin(&begin_info);
664 m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
665 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
666 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
667 &descriptor_set.set_, 0, nullptr);
668 vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
669 vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors);
670 vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
671 vk::CmdEndRenderPass(m_commandBuffer->handle());
672 m_commandBuffer->end();
673
674 // Starting address too low
675 VkDeviceAddress *data = (VkDeviceAddress *)buffer0.memory().map();
676 data[0] = pBuffer - 16;
677 data[1] = 4;
678 buffer0.memory().unmap();
679 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "access out of bounds");
680 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
681 ASSERT_VK_SUCCESS(err);
682 err = vk::QueueWaitIdle(m_device->m_queue);
683 ASSERT_VK_SUCCESS(err);
684 m_errorMonitor->VerifyFound();
685
686 // Run past the end
687 data = (VkDeviceAddress *)buffer0.memory().map();
688 data[0] = pBuffer;
689 data[1] = 5;
690 buffer0.memory().unmap();
691 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "access out of bounds");
692 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
693 ASSERT_VK_SUCCESS(err);
694 err = vk::QueueWaitIdle(m_device->m_queue);
695 ASSERT_VK_SUCCESS(err);
696 m_errorMonitor->VerifyFound();
697
698 // Positive test - stay inside buffer
699 m_errorMonitor->ExpectSuccess();
700 data = (VkDeviceAddress *)buffer0.memory().map();
701 data[0] = pBuffer;
702 data[1] = 4;
703 buffer0.memory().unmap();
704 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
705 ASSERT_VK_SUCCESS(err);
706 err = vk::QueueWaitIdle(m_device->m_queue);
707 ASSERT_VK_SUCCESS(err);
708 m_errorMonitor->VerifyNotFound();
709
710 if (mesh_shader_supported) {
711 const unsigned push_constant_range_count = 1;
712 VkPushConstantRange push_constant_ranges[push_constant_range_count] = {};
713 push_constant_ranges[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
714 push_constant_ranges[0].offset = 0;
715 push_constant_ranges[0].size = 2 * sizeof(VkDeviceAddress);
716
717 VkPipelineLayout mesh_pipeline_layout;
718 VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {};
719 pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
720 pipelineLayoutCreateInfo[0].pNext = NULL;
721 pipelineLayoutCreateInfo[0].pushConstantRangeCount = push_constant_range_count;
722 pipelineLayoutCreateInfo[0].pPushConstantRanges = push_constant_ranges;
723 pipelineLayoutCreateInfo[0].setLayoutCount = 0;
724 pipelineLayoutCreateInfo[0].pSetLayouts = nullptr;
725 vk::CreatePipelineLayout(m_device->handle(), pipelineLayoutCreateInfo, NULL, &mesh_pipeline_layout);
726
727 char const *mesh_shader_source =
728 "#version 460\n"
729 "#extension GL_NV_mesh_shader : require\n"
730 "#extension GL_EXT_buffer_reference : enable\n"
731 "layout(buffer_reference, buffer_reference_align = 16) buffer bufStruct;\n"
732 "layout(push_constant) uniform ufoo {\n"
733 " bufStruct data;\n"
734 " int nWrites;\n"
735 "} u_info;\n"
736 "layout(buffer_reference, std140) buffer bufStruct {\n"
737 " int a[4];\n"
738 "};\n"
739
740 "layout(local_size_x = 32) in;\n"
741 "layout(max_vertices = 64, max_primitives = 126) out;\n"
742 "layout(triangles) out;\n"
743
744 "uint invocationID = gl_LocalInvocationID.x;\n"
745 "void main() {\n"
746 " if (invocationID == 0) {\n"
747 " for (int i=0; i < u_info.nWrites; ++i) {\n"
748 " u_info.data.a[i] = 0xdeadca71;\n"
749 " }\n"
750 " }\n"
751 "}\n";
752 VkShaderObj ms(m_device, mesh_shader_source, VK_SHADER_STAGE_MESH_BIT_NV, this, "main", true);
753 VkPipelineObj mesh_pipe(m_device);
754 mesh_pipe.AddShader(&ms);
755 mesh_pipe.AddDefaultColorAttachment();
756 err = mesh_pipe.CreateVKPipeline(mesh_pipeline_layout, renderPass());
757 ASSERT_VK_SUCCESS(err);
758 m_commandBuffer->begin(&begin_info);
759 m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
760 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, mesh_pipe.handle());
761 VkDeviceAddress pushConstants[2] = {};
762 pushConstants[0] = pBuffer;
763 pushConstants[1] = 5;
764 vk::CmdPushConstants(m_commandBuffer->handle(), mesh_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pushConstants),
765 pushConstants);
766 vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
767 vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors);
768 vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
769 vk::CmdEndRenderPass(m_commandBuffer->handle());
770 m_commandBuffer->end();
771
772 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "access out of bounds");
773 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
774 ASSERT_VK_SUCCESS(err);
775 err = vk::QueueWaitIdle(m_device->m_queue);
776 ASSERT_VK_SUCCESS(err);
777 m_errorMonitor->VerifyFound();
778 }
779
780 vk::DestroyBuffer(m_device->handle(), buffer1, NULL);
781 vk::FreeMemory(m_device->handle(), buffer_mem, NULL);
782 }
783
TEST_F(VkGpuAssistedLayerTest,GpuValidationArrayOOBRayTracingShaders)784 TEST_F(VkGpuAssistedLayerTest, GpuValidationArrayOOBRayTracingShaders) {
785 TEST_DESCRIPTION(
786 "GPU validation: Verify detection of out-of-bounds descriptor array indexing and use of uninitialized descriptors for "
787 "ray tracing shaders using gpu assited validation.");
788 OOBRayTracingShadersTestBody(true);
789 }
790
TEST_F(VkGpuAssistedLayerTest,GpuBuildAccelerationStructureValidationInvalidHandle)791 TEST_F(VkGpuAssistedLayerTest, GpuBuildAccelerationStructureValidationInvalidHandle) {
792 TEST_DESCRIPTION(
793 "Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
794 "acceleration structure with an invalid handle for a bottom level acceleration structure.");
795
796 if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
797 /*need_gpu_validation=*/true)) {
798 return;
799 }
800
801 PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
802 reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
803 vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
804 assert(vkCmdBuildAccelerationStructureNV != nullptr);
805
806 VkBufferObj vbo;
807 VkBufferObj ibo;
808 VkGeometryNV geometry;
809 GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
810
811 VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
812 top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
813 top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
814 top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
815 top_level_as_create_info.info.instanceCount = 1;
816 top_level_as_create_info.info.geometryCount = 0;
817
818 VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
819
820 struct VkGeometryInstanceNV {
821 float transform[12];
822 uint32_t instanceCustomIndex : 24;
823 uint32_t mask : 8;
824 uint32_t instanceOffset : 24;
825 uint32_t flags : 8;
826 uint64_t accelerationStructureHandle;
827 };
828
829 VkGeometryInstanceNV instance = {
830 {
831 // clang-format off
832 1.0f, 0.0f, 0.0f, 0.0f,
833 0.0f, 1.0f, 0.0f, 0.0f,
834 0.0f, 0.0f, 1.0f, 0.0f,
835 // clang-format on
836 },
837 0,
838 0xFF,
839 0,
840 VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
841 1234567890, // invalid
842 };
843
844 VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
845 VkBufferObj instance_buffer;
846 instance_buffer.init(*m_device, instance_buffer_size,
847 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
848 VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
849
850 uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
851 std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
852 instance_buffer.memory().unmap();
853
854 VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
855 m_errorMonitor->VerifyNotFound();
856
857 VkBufferObj top_level_as_scratch;
858 top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
859
860 VkCommandBufferObj command_buffer(m_device, &command_pool);
861 command_buffer.begin();
862 vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
863 VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
864 command_buffer.end();
865
866 m_errorMonitor->SetDesiredFailureMsg(
867 kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
868
869 VkSubmitInfo submit_info = {};
870 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
871 submit_info.commandBufferCount = 1;
872 submit_info.pCommandBuffers = &command_buffer.handle();
873 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
874 vk::QueueWaitIdle(m_device->m_queue);
875 m_errorMonitor->VerifyFound();
876 }
877
TEST_F(VkGpuAssistedLayerTest,GpuBuildAccelerationStructureValidationBottomLevelNotYetBuilt)878 TEST_F(VkGpuAssistedLayerTest, GpuBuildAccelerationStructureValidationBottomLevelNotYetBuilt) {
879 TEST_DESCRIPTION(
880 "Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
881 "acceleration structure with a handle for a bottom level acceleration structure that has not yet been built.");
882
883 if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
884 /*need_gpu_validation=*/true)) {
885 return;
886 }
887
888 PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
889 reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
890 vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
891 assert(vkCmdBuildAccelerationStructureNV != nullptr);
892
893 VkBufferObj vbo;
894 VkBufferObj ibo;
895 VkGeometryNV geometry;
896 GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
897
898 VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
899 bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
900 bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
901 bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
902 bot_level_as_create_info.info.instanceCount = 0;
903 bot_level_as_create_info.info.geometryCount = 1;
904 bot_level_as_create_info.info.pGeometries = &geometry;
905
906 VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
907 top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
908 top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
909 top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
910 top_level_as_create_info.info.instanceCount = 1;
911 top_level_as_create_info.info.geometryCount = 0;
912
913 VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
914
915 struct VkGeometryInstanceNV {
916 float transform[12];
917 uint32_t instanceCustomIndex : 24;
918 uint32_t mask : 8;
919 uint32_t instanceOffset : 24;
920 uint32_t flags : 8;
921 uint64_t accelerationStructureHandle;
922 };
923
924 VkAccelerationStructureObj bot_level_as_never_built(*m_device, bot_level_as_create_info);
925 m_errorMonitor->VerifyNotFound();
926
927 VkGeometryInstanceNV instance = {
928 {
929 // clang-format off
930 1.0f, 0.0f, 0.0f, 0.0f,
931 0.0f, 1.0f, 0.0f, 0.0f,
932 0.0f, 0.0f, 1.0f, 0.0f,
933 // clang-format on
934 },
935 0,
936 0xFF,
937 0,
938 VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
939 bot_level_as_never_built.opaque_handle(),
940 };
941
942 VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
943 VkBufferObj instance_buffer;
944 instance_buffer.init(*m_device, instance_buffer_size,
945 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
946 VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
947
948 uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
949 std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
950 instance_buffer.memory().unmap();
951
952 VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
953 m_errorMonitor->VerifyNotFound();
954
955 VkBufferObj top_level_as_scratch;
956 top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
957
958 VkCommandBufferObj command_buffer(m_device, &command_pool);
959 command_buffer.begin();
960 vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
961 VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
962 command_buffer.end();
963
964 m_errorMonitor->SetDesiredFailureMsg(
965 kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
966
967 VkSubmitInfo submit_info = {};
968 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
969 submit_info.commandBufferCount = 1;
970 submit_info.pCommandBuffers = &command_buffer.handle();
971 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
972 vk::QueueWaitIdle(m_device->m_queue);
973 m_errorMonitor->VerifyFound();
974 }
975
TEST_F(VkGpuAssistedLayerTest,GpuBuildAccelerationStructureValidationBottomLevelDestroyed)976 TEST_F(VkGpuAssistedLayerTest, GpuBuildAccelerationStructureValidationBottomLevelDestroyed) {
977 TEST_DESCRIPTION(
978 "Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
979 "acceleration structure with a handle for a destroyed bottom level acceleration structure.");
980
981 if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
982 /*need_gpu_validation=*/true)) {
983 return;
984 }
985
986 PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
987 reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
988 vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
989 assert(vkCmdBuildAccelerationStructureNV != nullptr);
990
991 VkBufferObj vbo;
992 VkBufferObj ibo;
993 VkGeometryNV geometry;
994 GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
995
996 VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
997 bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
998 bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
999 bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
1000 bot_level_as_create_info.info.instanceCount = 0;
1001 bot_level_as_create_info.info.geometryCount = 1;
1002 bot_level_as_create_info.info.pGeometries = &geometry;
1003
1004 VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
1005 top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
1006 top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
1007 top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
1008 top_level_as_create_info.info.instanceCount = 1;
1009 top_level_as_create_info.info.geometryCount = 0;
1010
1011 VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
1012
1013 struct VkGeometryInstanceNV {
1014 float transform[12];
1015 uint32_t instanceCustomIndex : 24;
1016 uint32_t mask : 8;
1017 uint32_t instanceOffset : 24;
1018 uint32_t flags : 8;
1019 uint64_t accelerationStructureHandle;
1020 };
1021
1022 uint64_t destroyed_bot_level_as_handle = 0;
1023 {
1024 VkAccelerationStructureObj destroyed_bot_level_as(*m_device, bot_level_as_create_info);
1025 m_errorMonitor->VerifyNotFound();
1026
1027 destroyed_bot_level_as_handle = destroyed_bot_level_as.opaque_handle();
1028
1029 VkBufferObj bot_level_as_scratch;
1030 destroyed_bot_level_as.create_scratch_buffer(*m_device, &bot_level_as_scratch);
1031
1032 VkCommandBufferObj command_buffer(m_device, &command_pool);
1033 command_buffer.begin();
1034 vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
1035 destroyed_bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
1036 command_buffer.end();
1037
1038 VkSubmitInfo submit_info = {};
1039 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1040 submit_info.commandBufferCount = 1;
1041 submit_info.pCommandBuffers = &command_buffer.handle();
1042 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1043 vk::QueueWaitIdle(m_device->m_queue);
1044 m_errorMonitor->VerifyNotFound();
1045
1046 // vk::DestroyAccelerationStructureNV called on destroyed_bot_level_as during destruction.
1047 }
1048
1049 VkGeometryInstanceNV instance = {
1050 {
1051 // clang-format off
1052 1.0f, 0.0f, 0.0f, 0.0f,
1053 0.0f, 1.0f, 0.0f, 0.0f,
1054 0.0f, 0.0f, 1.0f, 0.0f,
1055 // clang-format on
1056 },
1057 0,
1058 0xFF,
1059 0,
1060 VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
1061 destroyed_bot_level_as_handle,
1062 };
1063
1064 VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
1065 VkBufferObj instance_buffer;
1066 instance_buffer.init(*m_device, instance_buffer_size,
1067 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1068 VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
1069
1070 uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
1071 std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
1072 instance_buffer.memory().unmap();
1073
1074 VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
1075 m_errorMonitor->VerifyNotFound();
1076
1077 VkBufferObj top_level_as_scratch;
1078 top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
1079
1080 VkCommandBufferObj command_buffer(m_device, &command_pool);
1081 command_buffer.begin();
1082 vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
1083 VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
1084 command_buffer.end();
1085
1086 m_errorMonitor->SetDesiredFailureMsg(
1087 kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
1088
1089 VkSubmitInfo submit_info = {};
1090 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1091 submit_info.commandBufferCount = 1;
1092 submit_info.pCommandBuffers = &command_buffer.handle();
1093 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1094 vk::QueueWaitIdle(m_device->m_queue);
1095 m_errorMonitor->VerifyFound();
1096 }
1097
TEST_F(VkGpuAssistedLayerTest,GpuBuildAccelerationStructureValidationRestoresState)1098 TEST_F(VkGpuAssistedLayerTest, GpuBuildAccelerationStructureValidationRestoresState) {
1099 TEST_DESCRIPTION("Validate that acceleration structure gpu validation correctly restores compute state.");
1100
1101 if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
1102 /*need_gpu_validation=*/true, /*need_push_descriptors=*/true)) {
1103 return;
1104 }
1105
1106 PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
1107 reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
1108 vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
1109 assert(vkCmdBuildAccelerationStructureNV != nullptr);
1110
1111 PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
1112 (PFN_vkCmdPushDescriptorSetKHR)vk::GetDeviceProcAddr(m_device->handle(), "vkCmdPushDescriptorSetKHR");
1113 assert(vkCmdPushDescriptorSetKHR != nullptr);
1114
1115 VkBufferObj vbo;
1116 VkBufferObj ibo;
1117 VkGeometryNV geometry;
1118 GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
1119
1120 VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
1121 top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
1122 top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
1123 top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
1124 top_level_as_create_info.info.instanceCount = 1;
1125 top_level_as_create_info.info.geometryCount = 0;
1126
1127 VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
1128
1129 struct VkGeometryInstanceNV {
1130 float transform[12];
1131 uint32_t instanceCustomIndex : 24;
1132 uint32_t mask : 8;
1133 uint32_t instanceOffset : 24;
1134 uint32_t flags : 8;
1135 uint64_t accelerationStructureHandle;
1136 };
1137
1138 VkGeometryInstanceNV instance = {
1139 {
1140 // clang-format off
1141 1.0f, 0.0f, 0.0f, 0.0f,
1142 0.0f, 1.0f, 0.0f, 0.0f,
1143 0.0f, 0.0f, 1.0f, 0.0f,
1144 // clang-format on
1145 },
1146 0,
1147 0xFF,
1148 0,
1149 VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
1150 1234567,
1151 };
1152
1153 VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
1154 VkBufferObj instance_buffer;
1155 instance_buffer.init(*m_device, instance_buffer_size,
1156 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1157 VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
1158
1159 uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
1160 std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
1161 instance_buffer.memory().unmap();
1162
1163 VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
1164 m_errorMonitor->VerifyNotFound();
1165
1166 VkBufferObj top_level_as_scratch;
1167 top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
1168
1169 struct ComputeOutput {
1170 uint32_t push_constant_value;
1171 uint32_t push_descriptor_value;
1172 uint32_t normal_descriptor_value;
1173 };
1174
1175 VkBufferObj push_descriptor_buffer;
1176 push_descriptor_buffer.init(*m_device, 4, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1177 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1178
1179 VkBufferObj normal_descriptor_buffer;
1180 normal_descriptor_buffer.init(*m_device, 4, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1181 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1182
1183 VkDeviceSize output_descriptor_buffer_size = static_cast<VkDeviceSize>(sizeof(ComputeOutput));
1184 VkBufferObj output_descriptor_buffer;
1185 output_descriptor_buffer.init(*m_device, output_descriptor_buffer_size,
1186 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1187 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1188
1189 const std::string cs_source = R"glsl(#version 450
1190 layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
1191
1192 layout(push_constant) uniform PushConstants { uint value; } push_constant;
1193 layout(set = 0, binding = 0, std430) buffer PushDescriptorBuffer { uint value; } push_descriptor;
1194 layout(set = 1, binding = 0, std430) buffer NormalDescriptorBuffer { uint value; } normal_descriptor;
1195
1196 layout(set = 2, binding = 0, std430) buffer ComputeOutputBuffer {
1197 uint push_constant_value;
1198 uint push_descriptor_value;
1199 uint normal_descriptor_value;
1200 } compute_output;
1201
1202 void main() {
1203 compute_output.push_constant_value = push_constant.value;
1204 compute_output.push_descriptor_value = push_descriptor.value;
1205 compute_output.normal_descriptor_value = normal_descriptor.value;
1206 }
1207 )glsl";
1208 VkShaderObj cs(m_device, cs_source.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this);
1209
1210 OneOffDescriptorSet push_descriptor_set(m_device,
1211 {
1212 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
1213 },
1214 VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
1215 OneOffDescriptorSet normal_descriptor_set(m_device,
1216 {
1217 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
1218 });
1219 OneOffDescriptorSet output_descriptor_set(m_device,
1220 {
1221 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
1222 });
1223
1224 VkPushConstantRange push_constant_range = {};
1225 push_constant_range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
1226 push_constant_range.size = 4;
1227 push_constant_range.offset = 0;
1228
1229 const VkPipelineLayoutObj compute_pipeline_layout(m_device,
1230 {
1231 &push_descriptor_set.layout_,
1232 &normal_descriptor_set.layout_,
1233 &output_descriptor_set.layout_,
1234 },
1235 {push_constant_range});
1236
1237 VkComputePipelineCreateInfo compute_pipeline_ci = {};
1238 compute_pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
1239 compute_pipeline_ci.layout = compute_pipeline_layout.handle();
1240 compute_pipeline_ci.stage = cs.GetStageCreateInfo();
1241
1242 VkPipeline compute_pipeline;
1243 ASSERT_VK_SUCCESS(
1244 vk::CreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &compute_pipeline_ci, nullptr, &compute_pipeline));
1245
1246 normal_descriptor_set.WriteDescriptorBufferInfo(0, normal_descriptor_buffer.handle(), 4, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1247 normal_descriptor_set.UpdateDescriptorSets();
1248
1249 output_descriptor_set.WriteDescriptorBufferInfo(0, output_descriptor_buffer.handle(), output_descriptor_buffer_size,
1250 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1251 output_descriptor_set.UpdateDescriptorSets();
1252
1253 // Set input data
1254 const uint32_t push_constant_value = 1234567890;
1255 const uint32_t push_descriptor_value = 98765432;
1256 const uint32_t normal_descriptor_value = 1111111;
1257
1258 uint32_t *mapped_push_descriptor_buffer_data = (uint32_t *)push_descriptor_buffer.memory().map();
1259 *mapped_push_descriptor_buffer_data = push_descriptor_value;
1260 push_descriptor_buffer.memory().unmap();
1261
1262 uint32_t *mapped_normal_descriptor_buffer_data = (uint32_t *)normal_descriptor_buffer.memory().map();
1263 *mapped_normal_descriptor_buffer_data = normal_descriptor_value;
1264 normal_descriptor_buffer.memory().unmap();
1265
1266 ComputeOutput *mapped_output_buffer_data = (ComputeOutput *)output_descriptor_buffer.memory().map();
1267 mapped_output_buffer_data->push_constant_value = 0;
1268 mapped_output_buffer_data->push_descriptor_value = 0;
1269 mapped_output_buffer_data->normal_descriptor_value = 0;
1270 output_descriptor_buffer.memory().unmap();
1271
1272 VkDescriptorBufferInfo push_descriptor_buffer_info = {};
1273 push_descriptor_buffer_info.buffer = push_descriptor_buffer.handle();
1274 push_descriptor_buffer_info.offset = 0;
1275 push_descriptor_buffer_info.range = 4;
1276 VkWriteDescriptorSet push_descriptor_set_write = {};
1277 push_descriptor_set_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1278 push_descriptor_set_write.descriptorCount = 1;
1279 push_descriptor_set_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1280 push_descriptor_set_write.dstBinding = 0;
1281 push_descriptor_set_write.pBufferInfo = &push_descriptor_buffer_info;
1282
1283 VkCommandBufferObj command_buffer(m_device, &command_pool);
1284 command_buffer.begin();
1285 vk::CmdBindPipeline(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline);
1286 vk::CmdPushConstants(command_buffer.handle(), compute_pipeline_layout.handle(), VK_SHADER_STAGE_COMPUTE_BIT, 0, 4,
1287 &push_constant_value);
1288 vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 0, 1,
1289 &push_descriptor_set_write);
1290 vk::CmdBindDescriptorSets(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 1, 1,
1291 &normal_descriptor_set.set_, 0, nullptr);
1292 vk::CmdBindDescriptorSets(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 2, 1,
1293 &output_descriptor_set.set_, 0, nullptr);
1294
1295 vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
1296 VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
1297
1298 vk::CmdDispatch(command_buffer.handle(), 1, 1, 1);
1299 command_buffer.end();
1300
1301 m_errorMonitor->SetDesiredFailureMsg(
1302 kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
1303
1304 VkSubmitInfo submit_info = {};
1305 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1306 submit_info.commandBufferCount = 1;
1307 submit_info.pCommandBuffers = &command_buffer.handle();
1308 vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1309 vk::QueueWaitIdle(m_device->m_queue);
1310
1311 m_errorMonitor->VerifyFound();
1312
1313 mapped_output_buffer_data = (ComputeOutput *)output_descriptor_buffer.memory().map();
1314 EXPECT_EQ(mapped_output_buffer_data->push_constant_value, push_constant_value);
1315 EXPECT_EQ(mapped_output_buffer_data->push_descriptor_value, push_descriptor_value);
1316 EXPECT_EQ(mapped_output_buffer_data->normal_descriptor_value, normal_descriptor_value);
1317 output_descriptor_buffer.memory().unmap();
1318
1319 // Clean up
1320 vk::DestroyPipeline(m_device->device(), compute_pipeline, nullptr);
1321 }
1322
TEST_F(VkGpuAssistedLayerTest,GpuValidationInlineUniformBlockAndMiscGpu)1323 TEST_F(VkGpuAssistedLayerTest, GpuValidationInlineUniformBlockAndMiscGpu) {
1324 TEST_DESCRIPTION(
1325 "GPU validation: Make sure inline uniform blocks don't generate false validation errors, verify reserved descriptor slot "
1326 "and verify pipeline recovery");
1327 SetTargetApiVersion(VK_API_VERSION_1_1);
1328 m_errorMonitor->ExpectSuccess();
1329 VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT,
1330 VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT};
1331 VkValidationFeaturesEXT features = {};
1332 features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
1333 features.enabledValidationFeatureCount = 2;
1334 features.pEnabledValidationFeatures = enables;
1335 bool descriptor_indexing = CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names,
1336 m_device_extension_names, &features, m_errorMonitor);
1337 if (IsPlatform(kMockICD) || DeviceSimulation()) {
1338 printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix);
1339 return;
1340 }
1341 VkPhysicalDeviceFeatures2KHR features2 = {};
1342 auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
1343 auto inline_uniform_block_features = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(&indexing_features);
1344 bool inline_uniform_block = DeviceExtensionSupported(gpu(), nullptr, VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME);
1345 if (!(descriptor_indexing && inline_uniform_block)) {
1346 printf("Descriptor indexing and/or inline uniform block not supported Skipping test\n");
1347 return;
1348 }
1349 m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
1350 m_device_extension_names.push_back(VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME);
1351 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
1352 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
1353 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
1354
1355 features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&inline_uniform_block_features);
1356 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
1357 if (!indexing_features.descriptorBindingPartiallyBound || !inline_uniform_block_features.inlineUniformBlock) {
1358 printf("Not all features supported, skipping test\n");
1359 return;
1360 }
1361 auto inline_uniform_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>();
1362 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_props);
1363 vk::GetPhysicalDeviceProperties2(gpu(), &prop2);
1364
1365 VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1366 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, pool_flags));
1367 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
1368 printf("%s GPU-Assisted validation test requires Vulkan 1.1+.\n", kSkipPrefix);
1369 return;
1370 }
1371 auto c_queue = m_device->GetDefaultComputeQueue();
1372 if (nullptr == c_queue) {
1373 printf("Compute not supported, skipping test\n");
1374 return;
1375 }
1376
1377 uint32_t qfi = 0;
1378 VkBufferCreateInfo bci = {};
1379 bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1380 bci.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1381 bci.size = 4;
1382 bci.queueFamilyIndexCount = 1;
1383 bci.pQueueFamilyIndices = &qfi;
1384 VkBufferObj buffer0;
1385 VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1386 buffer0.init(*m_device, bci, mem_props);
1387
1388 VkDescriptorBindingFlagsEXT ds_binding_flags[2] = {};
1389 ds_binding_flags[1] = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT;
1390 VkDescriptorSetLayoutBindingFlagsCreateInfoEXT layout_createinfo_binding_flags[1] = {};
1391 layout_createinfo_binding_flags[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT;
1392 layout_createinfo_binding_flags[0].pNext = NULL;
1393 layout_createinfo_binding_flags[0].bindingCount = 2;
1394 layout_createinfo_binding_flags[0].pBindingFlags = ds_binding_flags;
1395
1396 OneOffDescriptorSet descriptor_set(m_device,
1397 {
1398 {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
1399 {1, VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, 20, VK_SHADER_STAGE_ALL,
1400 nullptr}, // 16 bytes for ivec4, 4 more for int
1401 },
1402 0, layout_createinfo_binding_flags, 0);
1403 const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
1404
1405 VkDescriptorBufferInfo buffer_info[1] = {};
1406 buffer_info[0].buffer = buffer0.handle();
1407 buffer_info[0].offset = 0;
1408 buffer_info[0].range = sizeof(uint32_t);
1409
1410 const uint32_t test_data = 0xdeadca7;
1411 VkWriteDescriptorSetInlineUniformBlockEXT write_inline_uniform = {};
1412 write_inline_uniform.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT;
1413 write_inline_uniform.dataSize = 4;
1414 write_inline_uniform.pData = &test_data;
1415
1416 VkWriteDescriptorSet descriptor_writes[2] = {};
1417 descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1418 descriptor_writes[0].dstSet = descriptor_set.set_;
1419 descriptor_writes[0].dstBinding = 0;
1420 descriptor_writes[0].descriptorCount = 1;
1421 descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1422 descriptor_writes[0].pBufferInfo = buffer_info;
1423
1424 descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1425 descriptor_writes[1].dstSet = descriptor_set.set_;
1426 descriptor_writes[1].dstBinding = 1;
1427 descriptor_writes[1].dstArrayElement = 16; // Skip first 16 bytes (dummy)
1428 descriptor_writes[1].descriptorCount = 4; // Write 4 bytes to val
1429 descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
1430 descriptor_writes[1].pNext = &write_inline_uniform;
1431 vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL);
1432
1433 char const *csSource =
1434 "#version 450\n"
1435 "#extension GL_EXT_nonuniform_qualifier : enable\n "
1436 "layout(set = 0, binding = 0) buffer StorageBuffer { uint index; } u_index;"
1437 "layout(set = 0, binding = 1) uniform inlineubodef { ivec4 dummy; int val; } inlineubo;\n"
1438
1439 "void main() {\n"
1440 " u_index.index = inlineubo.val;\n"
1441 "}\n";
1442
1443 auto shader_module = new VkShaderObj(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
1444
1445 VkPipelineShaderStageCreateInfo stage;
1446 stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1447 stage.pNext = nullptr;
1448 stage.flags = 0;
1449 stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
1450 stage.module = shader_module->handle();
1451 stage.pName = "main";
1452 stage.pSpecializationInfo = nullptr;
1453
1454 // CreateComputePipelines
1455 VkComputePipelineCreateInfo pipeline_info = {};
1456 pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
1457 pipeline_info.pNext = nullptr;
1458 pipeline_info.flags = 0;
1459 pipeline_info.layout = pipeline_layout.handle();
1460 pipeline_info.basePipelineHandle = VK_NULL_HANDLE;
1461 pipeline_info.basePipelineIndex = -1;
1462 pipeline_info.stage = stage;
1463
1464 VkPipeline c_pipeline;
1465 vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &c_pipeline);
1466
1467 m_commandBuffer->begin();
1468 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, c_pipeline);
1469 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout.handle(), 0, 1,
1470 &descriptor_set.set_, 0, nullptr);
1471 vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1);
1472 m_commandBuffer->end();
1473
1474 VkSubmitInfo submit_info = {};
1475 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1476 submit_info.commandBufferCount = 1;
1477 submit_info.pCommandBuffers = &m_commandBuffer->handle();
1478 vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE);
1479 vk::QueueWaitIdle(m_device->m_queue);
1480 m_errorMonitor->VerifyNotFound();
1481 vk::DestroyPipeline(m_device->handle(), c_pipeline, NULL);
1482
1483 uint32_t *data = (uint32_t *)buffer0.memory().map();
1484 ASSERT_TRUE(*data = test_data);
1485 *data = 0;
1486 buffer0.memory().unmap();
1487
1488 // Also verify that binding slot reservation is working
1489 VkInstanceCreateInfo inst_info = {};
1490 VkInstance test_inst;
1491 inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
1492 vk::CreateInstance(&inst_info, NULL, &test_inst);
1493 uint32_t gpu_count;
1494 VkPhysicalDevice objs[4];
1495 vk::EnumeratePhysicalDevices(test_inst, &gpu_count, NULL);
1496 if (gpu_count > 4) gpu_count = 4;
1497 vk::EnumeratePhysicalDevices(test_inst, &gpu_count, objs);
1498 VkPhysicalDeviceProperties properties;
1499 vk::GetPhysicalDeviceProperties(objs[0], &properties);
1500 if (m_device->props.limits.maxBoundDescriptorSets != properties.limits.maxBoundDescriptorSets - 1)
1501 m_errorMonitor->SetError("VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT not functioning as expected");
1502 vk::DestroyInstance(test_inst, NULL);
1503
1504 auto set_count = properties.limits.maxBoundDescriptorSets;
1505 // Now be sure that recovery from an unavailable descriptor set works and that uninstrumented shaders are used
1506 VkDescriptorSetLayoutBinding dsl_binding[2] = {};
1507 dsl_binding[0].binding = 0;
1508 dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1509 dsl_binding[0].descriptorCount = 1;
1510 dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL;
1511 dsl_binding[1].binding = 1;
1512 dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
1513 dsl_binding[1].descriptorCount = 20;
1514 dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL;
1515 VkDescriptorSetLayout *layouts{new VkDescriptorSetLayout[set_count]{}};
1516 VkDescriptorSetLayoutCreateInfo dsl_create_info = {};
1517 dsl_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1518 dsl_create_info.pNext = layout_createinfo_binding_flags;
1519 dsl_create_info.pBindings = dsl_binding;
1520 dsl_create_info.bindingCount = 2;
1521 for (uint32_t i = 0; i < set_count; i++) {
1522 vk::CreateDescriptorSetLayout(m_device->handle(), &dsl_create_info, NULL, &layouts[i]);
1523 }
1524 VkPipelineLayoutCreateInfo pl_create_info = {};
1525 VkPipelineLayout pl_layout;
1526 pl_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1527 pl_create_info.setLayoutCount = set_count;
1528 pl_create_info.pSetLayouts = layouts;
1529 vk::CreatePipelineLayout(m_device->handle(), &pl_create_info, NULL, &pl_layout);
1530 pipeline_info.layout = pl_layout;
1531 vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &c_pipeline);
1532 m_commandBuffer->begin();
1533 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, c_pipeline);
1534 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pl_layout, 0, 1, &descriptor_set.set_, 0,
1535 nullptr);
1536 vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1);
1537 m_commandBuffer->end();
1538 vk::QueueSubmit(c_queue->handle(), 1, &submit_info, VK_NULL_HANDLE);
1539 vk::QueueWaitIdle(m_device->m_queue);
1540 vk::DestroyShaderModule(m_device->handle(), shader_module->handle(), NULL);
1541 vk::DestroyPipelineLayout(m_device->handle(), pl_layout, NULL);
1542 vk::DestroyPipeline(m_device->handle(), c_pipeline, NULL);
1543 for (uint32_t i = 0; i < set_count; i++) {
1544 vk::DestroyDescriptorSetLayout(m_device->handle(), layouts[i], NULL);
1545 }
1546 m_errorMonitor->VerifyNotFound();
1547 data = (uint32_t *)buffer0.memory().map();
1548 if (*data != test_data) m_errorMonitor->SetError("Pipeline recovery when resources unavailable not functioning as expected");
1549 buffer0.memory().unmap();
1550 delete[] layouts;
1551 }
1552
TEST_F(VkGpuAssistedLayerTest,GpuValidationAbort)1553 TEST_F(VkGpuAssistedLayerTest, GpuValidationAbort) {
1554 TEST_DESCRIPTION("GPU validation: Verify that aborting GPU-AV is safe.");
1555
1556 SetTargetApiVersion(VK_API_VERSION_1_1);
1557 InitGpuAssistedFramework(false);
1558 if (IsPlatform(kNexusPlayer)) {
1559 printf("%s This test should not run on Nexus Player\n", kSkipPrefix);
1560 return;
1561 }
1562 PFN_vkSetPhysicalDeviceFeaturesEXT fpvkSetPhysicalDeviceFeaturesEXT =
1563 (PFN_vkSetPhysicalDeviceFeaturesEXT)vk::GetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFeaturesEXT");
1564 PFN_vkGetOriginalPhysicalDeviceFeaturesEXT fpvkGetOriginalPhysicalDeviceFeaturesEXT =
1565 (PFN_vkGetOriginalPhysicalDeviceFeaturesEXT)vk::GetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceFeaturesEXT");
1566
1567 if (!(fpvkSetPhysicalDeviceFeaturesEXT) || !(fpvkGetOriginalPhysicalDeviceFeaturesEXT)) {
1568 printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix);
1569 return;
1570 }
1571
1572 VkPhysicalDeviceFeatures features = {};
1573 fpvkGetOriginalPhysicalDeviceFeaturesEXT(gpu(), &features);
1574
1575 // Disable features necessary for GPU-AV so initialization aborts
1576 features.vertexPipelineStoresAndAtomics = false;
1577 features.fragmentStoresAndAtomics = false;
1578 fpvkSetPhysicalDeviceFeaturesEXT(gpu(), features);
1579 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "GPU-Assisted Validation disabled");
1580 ASSERT_NO_FATAL_FAILURE(InitState());
1581 m_errorMonitor->VerifyFound();
1582 }
1583
TEST_F(VkGpuAssistedLayerTest,ValidationFeatures)1584 TEST_F(VkGpuAssistedLayerTest, ValidationFeatures) {
1585 TEST_DESCRIPTION("Validate Validation Features");
1586 SetTargetApiVersion(VK_API_VERSION_1_1);
1587 VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT};
1588 VkValidationFeaturesEXT features = {};
1589 features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
1590 features.enabledValidationFeatureCount = 1;
1591 features.pEnabledValidationFeatures = enables;
1592
1593 auto ici = GetInstanceCreateInfo();
1594 features.pNext = ici.pNext;
1595 ici.pNext = &features;
1596 VkInstance instance;
1597 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967");
1598 vk::CreateInstance(&ici, nullptr, &instance);
1599 m_errorMonitor->VerifyFound();
1600
1601 VkValidationFeatureEnableEXT printf_enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT,
1602 VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT};
1603 features.pEnabledValidationFeatures = printf_enables;
1604 features.enabledValidationFeatureCount = 2;
1605 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968");
1606 vk::CreateInstance(&ici, nullptr, &instance);
1607 m_errorMonitor->VerifyFound();
1608 }
1609
InitDebugPrintfFramework()1610 void VkDebugPrintfTest::InitDebugPrintfFramework() {
1611 VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT};
1612 VkValidationFeatureDisableEXT disables[] = {
1613 VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT, VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT,
1614 VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT, VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT};
1615 VkValidationFeaturesEXT features = {};
1616 features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
1617 features.enabledValidationFeatureCount = 1;
1618 features.disabledValidationFeatureCount = 4;
1619 features.pEnabledValidationFeatures = enables;
1620 features.pDisabledValidationFeatures = disables;
1621
1622 InitFramework(m_errorMonitor, &features);
1623 }
1624
TEST_F(VkDebugPrintfTest,GpuDebugPrintf)1625 TEST_F(VkDebugPrintfTest, GpuDebugPrintf) {
1626 TEST_DESCRIPTION("Verify that calls to debugPrintfEXT are received in debug stream");
1627 SetTargetApiVersion(VK_API_VERSION_1_1);
1628 m_device_extension_names.push_back(VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME);
1629 InitDebugPrintfFramework();
1630 if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME)) {
1631 printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
1632 VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME);
1633 return;
1634 }
1635 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
1636 if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
1637 printf("%s GPU-Assisted printf test requires Vulkan 1.1+.\n", kSkipPrefix);
1638 return;
1639 }
1640 auto features = m_device->phy().features();
1641 if (!features.vertexPipelineStoresAndAtomics || !features.fragmentStoresAndAtomics) {
1642 printf("%s GPU-Assisted printf test requires vertexPipelineStoresAndAtomics and fragmentStoresAndAtomics.\n", kSkipPrefix);
1643 return;
1644 }
1645 ASSERT_NO_FATAL_FAILURE(InitViewport());
1646 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1647
1648 if (IsPlatform(kMockICD) || DeviceSimulation()) {
1649 printf("%s GPU-Assisted printf test requires a driver that can draw.\n", kSkipPrefix);
1650 return;
1651 }
1652 // Make a uniform buffer to be passed to the shader that contains the test number
1653 uint32_t qfi = 0;
1654 VkBufferCreateInfo bci = {};
1655 bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1656 bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1657 bci.size = 8;
1658 bci.queueFamilyIndexCount = 1;
1659 bci.pQueueFamilyIndices = &qfi;
1660 VkBufferObj buffer0;
1661 VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1662 buffer0.init(*m_device, bci, mem_props);
1663 OneOffDescriptorSet descriptor_set(m_device, {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}});
1664
1665 const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
1666 VkDescriptorBufferInfo buffer_info[2] = {};
1667 buffer_info[0].buffer = buffer0.handle();
1668 buffer_info[0].offset = 0;
1669 buffer_info[0].range = sizeof(uint32_t);
1670
1671 VkWriteDescriptorSet descriptor_writes[1] = {};
1672 descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1673 descriptor_writes[0].dstSet = descriptor_set.set_;
1674 descriptor_writes[0].dstBinding = 0;
1675 descriptor_writes[0].descriptorCount = 1;
1676 descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
1677 descriptor_writes[0].pBufferInfo = buffer_info;
1678 vk::UpdateDescriptorSets(m_device->device(), 1, descriptor_writes, 0, NULL);
1679
1680 char const *shader_source =
1681 "#version 450\n"
1682 "#extension GL_EXT_debug_printf : enable\n"
1683 "layout(set = 0, binding = 0) uniform ufoo {\n"
1684 " int whichtest;\n"
1685 "} u_info;\n"
1686 "void main() {\n"
1687 " float myfloat = 3.1415f;\n"
1688 " int foo = -135;\n"
1689 " if (gl_VertexIndex == 0) {\n"
1690 " switch(u_info.whichtest) {\n"
1691 " case 0:\n"
1692 " debugPrintfEXT(\"Here are two float values %f, %f\", 1.0, myfloat);\n"
1693 " break;\n"
1694 " case 1:\n"
1695 " debugPrintfEXT(\"Here's a smaller float value %1.2f\", myfloat);\n"
1696 " break;\n"
1697 " case 2:\n"
1698 " debugPrintfEXT(\"Here's an integer %i with text before and after it\", foo);\n"
1699 " break;\n"
1700 " case 3:\n"
1701 " foo = 256;\n"
1702 " debugPrintfEXT(\"Here's an integer in octal %o and hex 0x%x\", foo, foo);\n"
1703 " break;\n"
1704 " case 4:\n"
1705 " debugPrintfEXT(\"%d is a negative integer\", foo);\n"
1706 " break;\n"
1707 " case 5:\n"
1708 " vec4 floatvec = vec4(1.2f, 2.2f, 3.2f, 4.2f);\n"
1709 " debugPrintfEXT(\"Here's a vector of floats %1.2v4f\", floatvec);\n"
1710 " break;\n"
1711 " case 6:\n"
1712 " debugPrintfEXT(\"Here's a float in sn %e\", myfloat);\n"
1713 " break;\n"
1714 " case 7:\n"
1715 " debugPrintfEXT(\"Here's a float in sn %1.2e\", myfloat);\n"
1716 " break;\n"
1717 " case 8:\n"
1718 " debugPrintfEXT(\"Here's a float in shortest %g\", myfloat);\n"
1719 " break;\n"
1720 " case 9:\n"
1721 " debugPrintfEXT(\"Here's a float in hex %1.9a\", myfloat);\n"
1722 " break;\n"
1723 " case 10:\n"
1724 " debugPrintfEXT(\"First printf with a %% and no value\");\n"
1725 " debugPrintfEXT(\"Second printf with a value %i\", foo);\n"
1726 " break;\n"
1727 " }\n"
1728 " }\n"
1729 " gl_Position = vec4(0.0, 0.0, 0.0, 0.0);\n"
1730 "}\n";
1731 std::vector<char const *> messages;
1732 messages.push_back("Here are two float values 1.000000, 3.141500");
1733 messages.push_back("Here's a smaller float value 3.14");
1734 messages.push_back("Here's an integer -135 with text before and after it");
1735 messages.push_back("Here's an integer in octal 400 and hex 0x100");
1736 messages.push_back("-135 is a negative integer");
1737 messages.push_back("Here's a vector of floats 1.20, 2.20, 3.20, 4.20");
1738 messages.push_back("Here's a float in sn 3.141500e+00");
1739 messages.push_back("Here's a float in sn 3.14e+00");
1740 messages.push_back("Here's a float in shortest 3.1415");
1741 messages.push_back("Here's a float in hex 0x1.921cac000p+1");
1742 // Two error messages have to be last in the vector
1743 messages.push_back("First printf with a % and no value");
1744 messages.push_back("Second printf with a value -135");
1745 VkShaderObj vs(m_device, shader_source, VK_SHADER_STAGE_VERTEX_BIT, this, "main", true);
1746
1747 VkViewport viewport = m_viewports[0];
1748 VkRect2D scissors = m_scissors[0];
1749
1750 VkSubmitInfo submit_info = {};
1751 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1752 submit_info.commandBufferCount = 1;
1753 submit_info.pCommandBuffers = &m_commandBuffer->handle();
1754
1755 VkPipelineObj pipe(m_device);
1756 pipe.AddShader(&vs);
1757 pipe.AddDefaultColorAttachment();
1758 VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
1759 ASSERT_VK_SUCCESS(err);
1760
1761 VkCommandBufferBeginInfo begin_info = {};
1762 VkCommandBufferInheritanceInfo hinfo = {};
1763 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
1764 begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1765 begin_info.pInheritanceInfo = &hinfo;
1766
1767 m_commandBuffer->begin(&begin_info);
1768 m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
1769 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
1770 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
1771 &descriptor_set.set_, 0, nullptr);
1772 vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
1773 vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors);
1774 vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
1775 vk::CmdEndRenderPass(m_commandBuffer->handle());
1776 m_commandBuffer->end();
1777
1778 for (uint32_t i = 0; i < messages.size(); i++) {
1779 VkDeviceAddress *data = (VkDeviceAddress *)buffer0.memory().map();
1780 data[0] = i;
1781 buffer0.memory().unmap();
1782 m_errorMonitor->SetDesiredFailureMsg(kInformationBit, messages[i]);
1783 if (10 == i) {
1784 m_errorMonitor->SetDesiredFailureMsg(kInformationBit, messages[i + 1]);
1785 i++;
1786 }
1787 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1788 ASSERT_VK_SUCCESS(err);
1789 err = vk::QueueWaitIdle(m_device->m_queue);
1790 ASSERT_VK_SUCCESS(err);
1791 m_errorMonitor->VerifyFound();
1792 }
1793
1794 if (features.shaderInt64) {
1795 char const *shader_source_int64 =
1796 "#version 450\n"
1797 "#extension GL_EXT_debug_printf : enable\n"
1798 "#extension GL_ARB_gpu_shader_int64 : enable\n"
1799 "layout(set = 0, binding = 0) uniform ufoo {\n"
1800 " int whichtest;\n"
1801 "} u_info;\n"
1802 "void main() {\n"
1803 " uint64_t bigvar = 0x2000000000000001ul;\n"
1804 " if (gl_VertexIndex == 0) {\n"
1805 " switch(u_info.whichtest) {\n"
1806 " case 0:\n"
1807 " debugPrintfEXT(\"Here's an unsigned long 0x%ul\", bigvar);\n"
1808 " break;\n"
1809 " case 1:\n"
1810 " u64vec4 vecul = u64vec4(bigvar, bigvar, bigvar, bigvar);"
1811 " debugPrintfEXT(\"Here's a vector of ul %v4ul\", vecul);\n"
1812 " break;\n"
1813 " }\n"
1814 " }\n"
1815 " gl_Position = vec4(0.0, 0.0, 0.0, 0.0);\n"
1816 "}\n";
1817 VkShaderObj vs_int64(m_device, shader_source_int64, VK_SHADER_STAGE_VERTEX_BIT, this, "main", true);
1818 VkPipelineObj pipe2(m_device);
1819 pipe2.AddShader(&vs_int64);
1820 pipe2.AddDefaultColorAttachment();
1821 err = pipe2.CreateVKPipeline(pipeline_layout.handle(), renderPass());
1822 ASSERT_VK_SUCCESS(err);
1823
1824 m_commandBuffer->begin(&begin_info);
1825 m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
1826 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe2.handle());
1827 vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
1828 &descriptor_set.set_, 0, nullptr);
1829 vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
1830 vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissors);
1831 vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
1832 vk::CmdEndRenderPass(m_commandBuffer->handle());
1833 m_commandBuffer->end();
1834
1835 VkDeviceAddress *data = (VkDeviceAddress *)buffer0.memory().map();
1836 data[0] = 0;
1837 buffer0.memory().unmap();
1838 m_errorMonitor->SetDesiredFailureMsg(kInformationBit, "Here's an unsigned long 0x2000000000000001");
1839 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1840 ASSERT_VK_SUCCESS(err);
1841 err = vk::QueueWaitIdle(m_device->m_queue);
1842 ASSERT_VK_SUCCESS(err);
1843 m_errorMonitor->VerifyFound();
1844 data = (VkDeviceAddress *)buffer0.memory().map();
1845 data[0] = 1;
1846 buffer0.memory().unmap();
1847 m_errorMonitor->SetDesiredFailureMsg(
1848 kInformationBit, "Here's a vector of ul 2000000000000001, 2000000000000001, 2000000000000001, 2000000000000001");
1849 err = vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
1850 ASSERT_VK_SUCCESS(err);
1851 err = vk::QueueWaitIdle(m_device->m_queue);
1852 ASSERT_VK_SUCCESS(err);
1853 m_errorMonitor->VerifyFound();
1854 }
1855 }
TEST_F(VkDebugPrintfTest,MeshTaskShadersPrintf)1856 TEST_F(VkDebugPrintfTest, MeshTaskShadersPrintf) {
1857 TEST_DESCRIPTION("Test debug printf in mesh and task shaders.");
1858
1859 SetTargetApiVersion(VK_API_VERSION_1_1);
1860 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
1861 m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
1862 } else {
1863 printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
1864 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
1865 return;
1866 }
1867 InitDebugPrintfFramework();
1868 std::vector<const char *> required_device_extensions = {VK_NV_MESH_SHADER_EXTENSION_NAME,
1869 VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME};
1870 for (auto device_extension : required_device_extensions) {
1871 if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) {
1872 m_device_extension_names.push_back(device_extension);
1873 } else {
1874 printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension);
1875 return;
1876 }
1877 }
1878
1879 if (IsPlatform(kMockICD) || DeviceSimulation()) {
1880 printf("%sNot suppored by MockICD, skipping tests\n", kSkipPrefix);
1881 return;
1882 }
1883
1884 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
1885 (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
1886 ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
1887
1888 // Create a device that enables mesh_shader
1889 auto mesh_shader_features = lvl_init_struct<VkPhysicalDeviceMeshShaderFeaturesNV>();
1890 auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&mesh_shader_features);
1891 vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
1892
1893 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
1894 ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
1895
1896 static const char taskShaderText[] =
1897 "#version 460\n"
1898 "#extension GL_NV_mesh_shader : enable\n"
1899 "#extension GL_EXT_debug_printf : enable\n"
1900 "layout(local_size_x = 32) in;\n"
1901 "uint invocationID = gl_LocalInvocationID.x;\n"
1902 "void main() {\n"
1903 " if (invocationID == 0) {\n"
1904 " gl_TaskCountNV = 1;\n"
1905 " debugPrintfEXT(\"hello from task shader\");\n"
1906 " }\n"
1907 "}\n";
1908
1909 static const char meshShaderText[] =
1910 "#version 450\n"
1911 "#extension GL_NV_mesh_shader : require\n"
1912 "#extension GL_EXT_debug_printf : enable\n"
1913 "layout(local_size_x = 1) in;\n"
1914 "layout(max_vertices = 3) out;\n"
1915 "layout(max_primitives = 1) out;\n"
1916 "layout(triangles) out;\n"
1917 "uint invocationID = gl_LocalInvocationID.x;\n"
1918 "void main() {\n"
1919 " if (invocationID == 0) {\n"
1920 " debugPrintfEXT(\"hello from mesh shader\");\n"
1921 " }\n"
1922 "}\n";
1923
1924 VkShaderObj ts(m_device, taskShaderText, VK_SHADER_STAGE_TASK_BIT_NV, this);
1925 VkShaderObj ms(m_device, meshShaderText, VK_SHADER_STAGE_MESH_BIT_NV, this);
1926 VkPipelineLayoutObj pipeline_layout(m_device);
1927 VkPipelineObj pipe(m_device);
1928 pipe.AddShader(&ts);
1929 pipe.AddShader(&ms);
1930 pipe.AddDefaultColorAttachment();
1931 VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
1932 ASSERT_VK_SUCCESS(err);
1933
1934 PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV =
1935 (PFN_vkCmdDrawMeshTasksNV)vk::GetInstanceProcAddr(instance(), "vkCmdDrawMeshTasksNV");
1936 ASSERT_TRUE(vkCmdDrawMeshTasksNV != nullptr);
1937
1938 m_commandBuffer->begin();
1939 vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
1940 vkCmdDrawMeshTasksNV(m_commandBuffer->handle(), 1, 0);
1941 m_commandBuffer->end();
1942
1943 m_errorMonitor->SetDesiredFailureMsg(kInformationBit, "hello from task shader");
1944 m_errorMonitor->SetDesiredFailureMsg(kInformationBit, "hello from mesh shader");
1945 m_commandBuffer->QueueCommandBuffer();
1946 err = vk::QueueWaitIdle(m_device->m_queue);
1947 ASSERT_VK_SUCCESS(err);
1948 m_errorMonitor->VerifyFound();
1949 }
1950