1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Nvidia Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Device Group Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktDeviceGroupTests.hpp"
26 #include "vktCustomInstancesDevices.hpp"
27
28 #include "vkDefs.hpp"
29 #include "vkDeviceUtil.hpp"
30 #include "vkImageUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkPlatform.hpp"
33 #include "vkPrograms.hpp"
34 #include "vkQueryUtil.hpp"
35 #include "vkRef.hpp"
36 #include "vkRefUtil.hpp"
37 #include "vkStrUtil.hpp"
38 #include "vkTypeUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkObjUtil.hpp"
41 #include "vktTestCase.hpp"
42 #include "vktTestCaseUtil.hpp"
43 #include "vktTestGroupUtil.hpp"
44
45 #include "tcuDefs.hpp"
46 #include "tcuFormatUtil.hpp"
47 #include "tcuImageCompare.hpp"
48 #include "tcuResource.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 #include "tcuCommandLine.hpp"
52 #include "tcuTextureUtil.hpp"
53 #include "tcuImageIO.hpp"
54
55 #include "rrRenderer.hpp"
56
57 #include <sstream>
58
59 namespace vkt
60 {
61 namespace DeviceGroup
62 {
63 namespace
64 {
65
66 using namespace vk;
67 using std::string;
68 using std::vector;
69 using tcu::TestLog;
70 using de::UniquePtr;
71
72 //Device group test modes
73 enum TestModeType
74 {
75 TEST_MODE_SFR = 1 << 0, //!< Split frame rendering
76 TEST_MODE_AFR = 1 << 1, //!< Alternate frame rendering
77 TEST_MODE_HOSTMEMORY = 1 << 2, //!< Use host memory for rendertarget
78 TEST_MODE_DEDICATED = 1 << 3, //!< Use dedicated allocations
79 TEST_MODE_PEER_FETCH = 1 << 4, //!< Peer vertex attributes from peer memory
80 TEST_MODE_TESSELLATION = 1 << 5, //!< Generate a tessellated sphere instead of triangle
81 TEST_MODE_LINEFILL = 1 << 6, //!< Draw polygon edges as line segments
82 };
83
84 class RefVertexShader : public rr::VertexShader
85 {
86 public:
RefVertexShader(void)87 RefVertexShader (void)
88 : rr::VertexShader(1, 0)
89 {
90 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
91 }
~RefVertexShader(void)92 virtual ~RefVertexShader(void) {}
93
shadeVertices(const rr::VertexAttrib * inputs,rr::VertexPacket * const * packets,const int numPackets) const94 void shadeVertices (const rr::VertexAttrib* inputs, rr::VertexPacket* const* packets, const int numPackets) const
95 {
96 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
97 {
98 packets[packetNdx]->position = rr::readVertexAttribFloat(inputs[0],
99 packets[packetNdx]->instanceNdx,
100 packets[packetNdx]->vertexNdx);
101 }
102 }
103 };
104
105 class RefFragmentShader : public rr::FragmentShader
106 {
107 public:
RefFragmentShader(void)108 RefFragmentShader (void)
109 : rr::FragmentShader(0, 1)
110 {
111 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
112 }
113
~RefFragmentShader(void)114 virtual ~RefFragmentShader(void) {}
115
shadeFragments(rr::FragmentPacket *,const int numPackets,const rr::FragmentShadingContext & context) const116 void shadeFragments (rr::FragmentPacket*, const int numPackets, const rr::FragmentShadingContext& context) const
117 {
118 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
119 {
120 for (int fragNdx = 0; fragNdx < rr::NUM_FRAGMENTS_PER_PACKET; ++fragNdx)
121 {
122 rr::writeFragmentOutput(context, packetNdx, fragNdx, 0, tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f));
123 }
124 }
125 }
126 };
127
renderReferenceTriangle(const tcu::PixelBufferAccess & dst,const tcu::Vec4 (& vertices)[3],const int subpixelBits)128 void renderReferenceTriangle (const tcu::PixelBufferAccess& dst, const tcu::Vec4(&vertices)[3], const int subpixelBits)
129 {
130 const RefVertexShader vertShader;
131 const RefFragmentShader fragShader;
132 const rr::Program program(&vertShader, &fragShader);
133 const rr::MultisamplePixelBufferAccess colorBuffer = rr::MultisamplePixelBufferAccess::fromSinglesampleAccess(dst);
134 const rr::RenderTarget renderTarget(colorBuffer);
135 const rr::RenderState renderState((rr::ViewportState(colorBuffer)), subpixelBits);
136 const rr::Renderer renderer;
137 const rr::VertexAttrib vertexAttribs[] =
138 {
139 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, vertices[0].getPtr())
140 };
141 renderer.draw(rr::DrawCommand(renderState,
142 renderTarget,
143 program,
144 DE_LENGTH_OF_ARRAY(vertexAttribs),
145 &vertexAttribs[0],
146 rr::PrimitiveList(rr::PRIMITIVETYPE_TRIANGLES, DE_LENGTH_OF_ARRAY(vertices), 0)));
147 }
148
149 class DeviceGroupTestInstance : public TestInstance
150 {
151 public:
152 DeviceGroupTestInstance(Context& context, deUint32 mode);
~DeviceGroupTestInstance(void)153 ~DeviceGroupTestInstance(void) {}
154 private:
155 void init (void);
156 deUint32 getMemoryIndex (deUint32 memoryTypeBits, deUint32 memoryPropertyFlag);
157 bool isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID);
158 void SubmitBufferAndWaitForIdle (const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask);
159 virtual tcu::TestStatus iterate (void);
160
161 Move<VkDevice> m_deviceGroup;
162 deUint32 m_physicalDeviceCount;
163 VkQueue m_deviceGroupQueue;
164 vector<VkPhysicalDevice> m_physicalDevices;
165
166 deUint32 m_testMode;
167 bool m_useHostMemory;
168 bool m_useDedicated;
169 bool m_usePeerFetch;
170 bool m_subsetAllocation;
171 bool m_fillModeNonSolid;
172 bool m_drawTessellatedSphere;
173 };
174
DeviceGroupTestInstance(Context & context,const deUint32 mode)175 DeviceGroupTestInstance::DeviceGroupTestInstance (Context& context, const deUint32 mode)
176 : TestInstance (context)
177 , m_physicalDeviceCount (0)
178 , m_deviceGroupQueue (DE_NULL)
179 , m_testMode (mode)
180 , m_useHostMemory (m_testMode & TEST_MODE_HOSTMEMORY)
181 , m_useDedicated (m_testMode & TEST_MODE_DEDICATED)
182 , m_usePeerFetch (m_testMode & TEST_MODE_PEER_FETCH)
183 , m_subsetAllocation (true)
184 , m_fillModeNonSolid (m_testMode & TEST_MODE_LINEFILL)
185 , m_drawTessellatedSphere (m_testMode & TEST_MODE_TESSELLATION)
186 {
187 init();
188 }
189
getMemoryIndex(const deUint32 memoryTypeBits,const deUint32 memoryPropertyFlag)190 deUint32 DeviceGroupTestInstance::getMemoryIndex (const deUint32 memoryTypeBits, const deUint32 memoryPropertyFlag)
191 {
192 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
193 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemProps.memoryTypeCount; memoryTypeNdx++)
194 {
195 if ((memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
196 (deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags & memoryPropertyFlag) == memoryPropertyFlag)
197 return memoryTypeNdx;
198 }
199 TCU_THROW(NotSupportedError, "No compatible memory type found");
200 }
201
isPeerFetchAllowed(deUint32 memoryTypeIndex,deUint32 firstdeviceID,deUint32 seconddeviceID)202 bool DeviceGroupTestInstance::isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID)
203 {
204 VkPeerMemoryFeatureFlags peerMemFeatures1;
205 VkPeerMemoryFeatureFlags peerMemFeatures2;
206 const DeviceDriver vk (m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup);
207 const VkPhysicalDeviceMemoryProperties deviceMemProps1 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[firstdeviceID]);
208 const VkPhysicalDeviceMemoryProperties deviceMemProps2 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[seconddeviceID]);
209 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps2.memoryTypes[memoryTypeIndex].heapIndex, firstdeviceID, seconddeviceID, &peerMemFeatures1);
210 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps1.memoryTypes[memoryTypeIndex].heapIndex, seconddeviceID, firstdeviceID, &peerMemFeatures2);
211 return (peerMemFeatures1 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT) && (peerMemFeatures2 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT);
212 }
213
init(void)214 void DeviceGroupTestInstance::init (void)
215 {
216 if (!m_context.isInstanceFunctionalitySupported("VK_KHR_device_group_creation"))
217 TCU_THROW(NotSupportedError, "Device Group tests are not supported, no device group extension present.");
218
219 const InstanceInterface& instanceInterface = m_context.getInstanceInterface();
220 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
221 const deUint32 queueIndex = 0;
222 const float queuePriority = 1.0f;
223 vector<const char*> extensionPtrs;
224 de::MovePtr<vk::DeviceDriver> deviceDriver;
225 vector<const char*> layerPtrs;
226 vector<string> deviceExtensions;
227 vector<string> enabledLayers;
228
229 if (!m_context.isDeviceFunctionalitySupported("VK_KHR_device_group"))
230 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_device_group");
231
232 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
233 deviceExtensions.push_back("VK_KHR_device_group");
234
235 if(m_useDedicated)
236 {
237 if (!m_context.isDeviceFunctionalitySupported("VK_KHR_dedicated_allocation"))
238 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_dedicated_allocation");
239
240 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_dedicated_allocation"))
241 deviceExtensions.push_back("VK_KHR_dedicated_allocation");
242 }
243
244 {
245 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
246 const vector<vk::VkPhysicalDeviceGroupProperties> properties = enumeratePhysicalDeviceGroups(instanceInterface, m_context.getInstance());
247 const int kGroupId = cmdLine.getVKDeviceGroupId();
248 const int kGroupIndex = kGroupId - 1;
249 const int kDevId = cmdLine.getVKDeviceId();
250 const int kDevIndex = kDevId - 1;
251
252 if (kGroupId < 1 || static_cast<size_t>(kGroupId) > properties.size())
253 {
254 std::ostringstream msg;
255 msg << "Invalid device group id " << kGroupId << " (only " << properties.size() << " device groups found)";
256 TCU_THROW(NotSupportedError, msg.str());
257 }
258
259 m_physicalDeviceCount = properties[kGroupIndex].physicalDeviceCount;
260 for (deUint32 idx = 0; idx < m_physicalDeviceCount; idx++)
261 {
262 m_physicalDevices.push_back(properties[kGroupIndex].physicalDevices[idx]);
263 }
264
265 if (m_usePeerFetch && m_physicalDeviceCount < 2)
266 TCU_THROW(NotSupportedError, "Peer fetching needs more than 1 physical device.");
267
268 if (!(m_testMode & TEST_MODE_AFR) || (m_physicalDeviceCount > 1))
269 {
270 if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), std::string("VK_KHR_bind_memory2")))
271 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_bind_memory2");
272 deviceExtensions.push_back("VK_KHR_bind_memory2");
273 }
274
275 const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
276 {
277 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //type
278 DE_NULL, //pNext
279 (VkDeviceQueueCreateFlags)0u, //flags
280 queueFamilyIndex, //queueFamilyIndex;
281 1u, //queueCount;
282 &queuePriority, //pQueuePriorities;
283 };
284 const VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
285 {
286 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, //stype
287 DE_NULL, //pNext
288 properties[kGroupIndex].physicalDeviceCount, //physicalDeviceCount
289 properties[kGroupIndex].physicalDevices //physicalDevices
290 };
291
292 if (kDevId < 1 || static_cast<deUint32>(kDevId) > m_physicalDeviceCount)
293 {
294 std::ostringstream msg;
295 msg << "Device id " << kDevId << " invalid for group " << kGroupId << " (group " << kGroupId << " has " << m_physicalDeviceCount << " devices)";
296 TCU_THROW(NotSupportedError, msg.str());
297 }
298
299 VkPhysicalDevice physicalDevice = properties[kGroupIndex].physicalDevices[kDevIndex];
300 VkPhysicalDeviceFeatures enabledDeviceFeatures = getPhysicalDeviceFeatures(instanceInterface, physicalDevice);
301 m_subsetAllocation = properties[kGroupIndex].subsetAllocation;
302
303 if (m_drawTessellatedSphere & static_cast<bool>(!enabledDeviceFeatures.tessellationShader))
304 TCU_THROW(NotSupportedError, "Tessellation is not supported.");
305
306 if (m_fillModeNonSolid & static_cast<bool>(!enabledDeviceFeatures.fillModeNonSolid))
307 TCU_THROW(NotSupportedError, "Line polygon mode is not supported.");
308
309 extensionPtrs.resize(deviceExtensions.size());
310 for (size_t ndx = 0; ndx < deviceExtensions.size(); ++ndx)
311 extensionPtrs[ndx] = deviceExtensions[ndx].c_str();
312
313 const VkDeviceCreateInfo deviceCreateInfo =
314 {
315 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
316 &deviceGroupInfo, //pNext;
317 (VkDeviceCreateFlags)0u, //flags
318 1, //queueRecordCount;
319 &deviceQueueCreateInfo, //pRequestedQueues;
320 0u, //layerCount;
321 DE_NULL, //ppEnabledLayerNames;
322 (deUint32)extensionPtrs.size(), //extensionCount;
323 (extensionPtrs.empty() ? DE_NULL : &extensionPtrs[0]), //ppEnabledExtensionNames;
324 &enabledDeviceFeatures, //pEnabledFeatures;
325 };
326 m_deviceGroup = createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), m_context.getInstance(), instanceInterface, physicalDevice, &deviceCreateInfo);
327 }
328
329 deviceDriver = de::MovePtr<vk::DeviceDriver>(new vk::DeviceDriver(m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup));
330 m_deviceGroupQueue = getDeviceQueue(*deviceDriver, *m_deviceGroup, queueFamilyIndex, queueIndex);
331 }
332
SubmitBufferAndWaitForIdle(const DeviceDriver & vk,VkCommandBuffer cmdBuf,deUint32 deviceMask)333 void DeviceGroupTestInstance::SubmitBufferAndWaitForIdle(const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask)
334 {
335 submitCommandsAndWait(vk, *m_deviceGroup, m_deviceGroupQueue, cmdBuf, true, deviceMask);
336 VK_CHECK(vk.deviceWaitIdle(*m_deviceGroup));
337 }
338
iterate(void)339 tcu::TestStatus DeviceGroupTestInstance::iterate (void)
340 {
341 const InstanceInterface& vki (m_context.getInstanceInterface());
342 const DeviceDriver vk (m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup);
343 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
344 const tcu::UVec2 renderSize (256, 256);
345 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
346 const tcu::Vec4 clearColor (0.125f, 0.25f, 0.75f, 1.0f);
347 const tcu::Vec4 drawColor (1.0f, 1.0f, 0.0f, 1.0f);
348 const float tessLevel = 16.0f;
349 SimpleAllocator memAlloc (vk, *m_deviceGroup, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
350 bool iterateResultSuccess = false;
351 const tcu::Vec4 sphereVertices[] =
352 {
353 tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f),
354 tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f),
355 tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f),
356 tcu::Vec4(0.0f, 0.0f, -1.0f, 1.0f),
357 tcu::Vec4(0.0f, -1.0f, 0.0f, 1.0f),
358 tcu::Vec4(-1.0f, 0.0f, 0.0f, 1.0f),
359 };
360 const deUint32 sphereIndices[] = {0, 1, 2, 2, 1, 3, 3, 1, 5, 5, 1, 0, 0, 2, 4, 2, 3, 4, 3, 5, 4, 5, 0, 4};
361 const tcu::Vec4 triVertices[] =
362 {
363 tcu::Vec4(-0.5f, -0.5f, 0.0f, 1.0f),
364 tcu::Vec4(+0.5f, -0.5f, 0.0f, 1.0f),
365 tcu::Vec4(0.0f, +0.5f, 0.0f, 1.0f)
366 };
367 const deUint32 triIndices[] = {0, 1, 2};
368 const tcu::Vec4 * vertices = m_drawTessellatedSphere ? &sphereVertices[0] : &triVertices[0];
369 const deUint32 * indices = m_drawTessellatedSphere ? &sphereIndices[0] : &triIndices[0];
370 const deUint32 verticesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereVertices)) : deUint32(sizeof(triVertices));
371 const deUint32 numIndices = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)/sizeof(sphereIndices[0])) : deUint32(sizeof(triIndices)/sizeof(triIndices[0]));
372 const deUint32 indicesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)) : deUint32(sizeof(triIndices));
373
374 // Loop through all physical devices in the device group
375 for (deUint32 physDevID = 0; physDevID < m_physicalDeviceCount; physDevID++)
376 {
377 const deUint32 firstDeviceID = physDevID;
378 const deUint32 secondDeviceID = (firstDeviceID + 1 ) % m_physicalDeviceCount;
379 vector<deUint32> deviceIndices (m_physicalDeviceCount);
380 bool isPeerMemAsCopySrcAllowed = true;
381 // Set broadcast on memory allocation
382 const deUint32 allocDeviceMask = m_subsetAllocation ? (1 << firstDeviceID) | (1 << secondDeviceID) : (1 << m_physicalDeviceCount) - 1;
383
384 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
385 deviceIndices[i] = i;
386 deviceIndices[firstDeviceID] = secondDeviceID;
387 deviceIndices[secondDeviceID] = firstDeviceID;
388
389 VkMemoryRequirements memReqs =
390 {
391 0, // VkDeviceSize size
392 0, // VkDeviceSize alignment
393 0, // uint32_t memoryTypeBits
394 };
395 deUint32 memoryTypeNdx = 0;
396 de::MovePtr<Allocation> stagingVertexBufferMemory;
397 de::MovePtr<Allocation> stagingIndexBufferMemory;
398 de::MovePtr<Allocation> stagingUniformBufferMemory;
399 de::MovePtr<Allocation> stagingSboBufferMemory;
400
401 vk::Move<vk::VkDeviceMemory> vertexBufferMemory;
402 vk::Move<vk::VkDeviceMemory> indexBufferMemory;
403 vk::Move<vk::VkDeviceMemory> uniformBufferMemory;
404 vk::Move<vk::VkDeviceMemory> sboBufferMemory;
405 vk::Move<vk::VkDeviceMemory> imageMemory;
406
407 Move<VkRenderPass> renderPass;
408 Move<VkImage> renderImage;
409 Move<VkImage> readImage;
410
411 Move<VkDescriptorSetLayout> descriptorSetLayout;
412 Move<VkDescriptorPool> descriptorPool;
413 Move<VkDescriptorSet> descriptorSet;
414
415 Move<VkBuffer> stagingVertexBuffer;
416 Move<VkBuffer> stagingUniformBuffer;
417 Move<VkBuffer> stagingIndexBuffer;
418 Move<VkBuffer> stagingSboBuffer;
419
420 Move<VkBuffer> vertexBuffer;
421 Move<VkBuffer> indexBuffer;
422 Move<VkBuffer> uniformBuffer;
423 Move<VkBuffer> sboBuffer;
424
425 Move<VkPipeline> pipeline;
426 Move<VkPipelineLayout> pipelineLayout;
427
428 Move<VkImageView> colorAttView;
429 Move<VkFramebuffer> framebuffer;
430 Move<VkCommandPool> cmdPool;
431 Move<VkCommandBuffer> cmdBuffer;
432
433 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo =
434 {
435 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, // sType
436 DE_NULL, // pNext
437 DE_NULL, // image
438 DE_NULL // buffer
439 };
440
441 VkMemoryAllocateFlagsInfo allocDeviceMaskInfo =
442 {
443 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, // sType
444 m_useDedicated ? &dedicatedAllocInfo : DE_NULL, // pNext
445 VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, // flags
446 allocDeviceMask, // deviceMask
447 };
448
449 VkMemoryAllocateInfo allocInfo =
450 {
451 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
452 &allocDeviceMaskInfo, // pNext
453 0u, // allocationSize
454 0u, // memoryTypeIndex
455 };
456
457 // create vertex buffers
458 {
459 const VkBufferCreateInfo stagingVertexBufferParams =
460 {
461 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
462 DE_NULL, // pNext
463 0u, // flags
464 (VkDeviceSize)verticesSize, // size
465 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
466 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
467 1u, // queueFamilyIndexCount
468 &queueFamilyIndex, // pQueueFamilyIndices
469 };
470 stagingVertexBuffer = createBuffer(vk, *m_deviceGroup, &stagingVertexBufferParams);
471 stagingVertexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingVertexBuffer), MemoryRequirement::HostVisible);
472 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingVertexBuffer, stagingVertexBufferMemory->getMemory(), stagingVertexBufferMemory->getOffset()));
473
474 void* vertexBufPtr = stagingVertexBufferMemory->getHostPtr();
475 deMemcpy(vertexBufPtr, &vertices[0], verticesSize);
476 flushAlloc(vk, *m_deviceGroup, *stagingVertexBufferMemory);
477 }
478
479 {
480 const VkBufferCreateInfo vertexBufferParams =
481 {
482 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
483 DE_NULL, // pNext
484 0u, // flags
485 (VkDeviceSize)verticesSize, // size
486 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
487 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
488 1u, // queueFamilyIndexCount
489 &queueFamilyIndex, // pQueueFamilyIndices
490 };
491 vertexBuffer = createBuffer(vk, *m_deviceGroup, &vertexBufferParams);
492
493 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, vertexBuffer.get());
494 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
495
496 dedicatedAllocInfo.buffer = vertexBuffer.get();
497 allocInfo.allocationSize = memReqs.size;
498 allocInfo.memoryTypeIndex = memoryTypeNdx;
499 vertexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
500
501 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
502 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
503
504 // Bind vertex buffer
505 if (m_usePeerFetch)
506 {
507 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
508 {
509 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
510 DE_NULL, // pNext
511 m_physicalDeviceCount, // deviceIndexCount
512 &deviceIndices[0], // pDeviceIndices
513 };
514
515 VkBindBufferMemoryInfo bindInfo =
516 {
517 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
518 &devGroupBindInfo, // pNext
519 vertexBuffer.get(), // buffer
520 vertexBufferMemory.get(), // memory
521 0u, // memoryOffset
522 };
523 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
524 }
525 else
526 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *vertexBuffer, vertexBufferMemory.get(), 0));
527 }
528
529 // create index buffers
530 {
531 const VkBufferCreateInfo stagingIndexBufferParams =
532 {
533 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
534 DE_NULL, // pNext
535 0u, // flags
536 (VkDeviceSize)indicesSize, // size
537 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
538 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
539 1u, // queueFamilyIndexCount
540 &queueFamilyIndex, // pQueueFamilyIndices
541 };
542 stagingIndexBuffer = createBuffer(vk, *m_deviceGroup, &stagingIndexBufferParams);
543 stagingIndexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingIndexBuffer), MemoryRequirement::HostVisible);
544 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingIndexBuffer, stagingIndexBufferMemory->getMemory(), stagingIndexBufferMemory->getOffset()));
545
546 void* indexBufPtr = stagingIndexBufferMemory->getHostPtr();
547 deMemcpy(indexBufPtr, &indices[0], indicesSize);
548 flushAlloc(vk, *m_deviceGroup, *stagingIndexBufferMemory);
549 }
550
551 {
552 const VkBufferCreateInfo indexBufferParams =
553 {
554 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
555 DE_NULL, // pNext
556 0u, // flags
557 (VkDeviceSize)indicesSize, // size
558 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
559 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
560 1u, // queueFamilyIndexCount
561 &queueFamilyIndex, // pQueueFamilyIndices
562 };
563 indexBuffer = createBuffer(vk, *m_deviceGroup, &indexBufferParams);
564
565 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, indexBuffer.get());
566 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
567
568 dedicatedAllocInfo.buffer = indexBuffer.get();
569 allocInfo.allocationSize = memReqs.size;
570 allocInfo.memoryTypeIndex = memoryTypeNdx;
571 indexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
572
573 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
574 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
575
576 // Bind index buffer
577 if (m_usePeerFetch)
578 {
579 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
580 {
581 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
582 DE_NULL, // pNext
583 m_physicalDeviceCount, // deviceIndexCount
584 &deviceIndices[0], // pDeviceIndices
585 };
586
587 VkBindBufferMemoryInfo bindInfo =
588 {
589 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
590 &devGroupBindInfo, // pNext
591 indexBuffer.get(), // buffer
592 indexBufferMemory.get(), // memory
593 0u, // memoryOffset
594 };
595 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
596 }
597 else
598 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *indexBuffer, indexBufferMemory.get(), 0));
599 }
600
601 // create uniform buffers
602 {
603 const VkBufferCreateInfo stagingUniformBufferParams =
604 {
605 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
606 DE_NULL, // pNext
607 0u, // flags
608 (VkDeviceSize)sizeof(drawColor), // size
609 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
610 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
611 1u, // queueFamilyIndexCount
612 &queueFamilyIndex, // pQueueFamilyIndices
613 };
614 stagingUniformBuffer = createBuffer(vk, *m_deviceGroup, &stagingUniformBufferParams);
615 stagingUniformBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingUniformBuffer), MemoryRequirement::HostVisible);
616 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingUniformBuffer, stagingUniformBufferMemory->getMemory(), stagingUniformBufferMemory->getOffset()));
617
618 void* uniformBufPtr = stagingUniformBufferMemory->getHostPtr();
619 deMemcpy(uniformBufPtr, &drawColor[0], sizeof(drawColor));
620 flushAlloc(vk, *m_deviceGroup, *stagingUniformBufferMemory);
621 }
622
623 {
624 const VkBufferCreateInfo uniformBufferParams =
625 {
626 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
627 DE_NULL, // pNext
628 0u, // flags
629 (VkDeviceSize)sizeof(drawColor), // size
630 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
631 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
632 1u, // queueFamilyIndexCount
633 &queueFamilyIndex, // pQueueFamilyIndices
634 };
635 uniformBuffer = createBuffer(vk, *m_deviceGroup, &uniformBufferParams);
636
637 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, uniformBuffer.get());
638 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
639
640 dedicatedAllocInfo.buffer = uniformBuffer.get();
641 allocInfo.allocationSize = memReqs.size;
642 allocInfo.memoryTypeIndex = memoryTypeNdx;
643 uniformBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
644
645 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
646 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
647
648 if (m_usePeerFetch)
649 {
650 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
651 {
652 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
653 DE_NULL, // pNext
654 m_physicalDeviceCount, // deviceIndexCount
655 &deviceIndices[0], // pDeviceIndices
656 };
657
658 VkBindBufferMemoryInfo bindInfo =
659 {
660 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
661 &devGroupBindInfo, // pNext
662 uniformBuffer.get(), // buffer
663 uniformBufferMemory.get(), // memory
664 0u, // memoryOffset
665 };
666 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
667 }
668 else
669 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, uniformBuffer.get(), uniformBufferMemory.get(), 0));
670 }
671
672 // create SBO buffers
673 {
674 const VkBufferCreateInfo stagingSboBufferParams =
675 {
676 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
677 DE_NULL, // pNext
678 0u, // flags
679 (VkDeviceSize)sizeof(tessLevel), // size
680 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
681 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
682 1u, // queueFamilyIndexCount
683 &queueFamilyIndex, // pQueueFamilyIndices
684 };
685 stagingSboBuffer = createBuffer(vk, *m_deviceGroup, &stagingSboBufferParams);
686 stagingSboBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingSboBuffer), MemoryRequirement::HostVisible);
687 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingSboBuffer, stagingSboBufferMemory->getMemory(), stagingSboBufferMemory->getOffset()));
688
689 void* sboBufPtr = stagingSboBufferMemory->getHostPtr();
690 deMemcpy(sboBufPtr, &tessLevel, sizeof(tessLevel));
691 flushAlloc(vk, *m_deviceGroup, *stagingSboBufferMemory);
692 }
693
694 {
695 const VkBufferCreateInfo sboBufferParams =
696 {
697 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
698 DE_NULL, // pNext
699 0u, // flags
700 (VkDeviceSize)sizeof(tessLevel), // size
701 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
702 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
703 1u, // queueFamilyIndexCount
704 &queueFamilyIndex, // pQueueFamilyIndices
705 };
706 sboBuffer = createBuffer(vk, *m_deviceGroup, &sboBufferParams);
707
708 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, sboBuffer.get());
709 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
710
711 dedicatedAllocInfo.buffer = sboBuffer.get();
712 allocInfo.allocationSize = memReqs.size;
713 allocInfo.memoryTypeIndex = memoryTypeNdx;
714 sboBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
715
716 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
717 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
718
719 if (m_usePeerFetch)
720 {
721 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
722 {
723 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
724 DE_NULL, // pNext
725 m_physicalDeviceCount, // deviceIndexCount
726 &deviceIndices[0], // pDeviceIndices
727 };
728
729 VkBindBufferMemoryInfo bindInfo =
730 {
731 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
732 &devGroupBindInfo, // pNext
733 sboBuffer.get(), // buffer
734 sboBufferMemory.get(), // memory
735 0u, // memoryOffset
736 };
737 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
738 }
739 else
740 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, sboBuffer.get(), sboBufferMemory.get(), 0));
741 }
742
743 // Create image resources
744 // Use a consistent usage flag because of memory aliasing
745 VkImageUsageFlags imageUsageFlag = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
746 {
747 // Check for SFR support
748 VkImageFormatProperties properties;
749 if ((m_testMode & TEST_MODE_SFR) && vki.getPhysicalDeviceImageFormatProperties(m_context.getPhysicalDevice(),
750 colorFormat, // format
751 VK_IMAGE_TYPE_2D, // type
752 VK_IMAGE_TILING_OPTIMAL, // tiling
753 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // usage
754 VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, // flags
755 &properties) != VK_SUCCESS) // properties
756 {
757 TCU_THROW(NotSupportedError, "Format not supported for SFR");
758 }
759
760 VkImageCreateFlags imageCreateFlags = VK_IMAGE_CREATE_ALIAS_BIT; // The image objects alias same memory
761 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
762 {
763 imageCreateFlags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT;
764 }
765
766 const VkImageCreateInfo imageParams =
767 {
768 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
769 DE_NULL, // pNext
770 imageCreateFlags, // flags
771 VK_IMAGE_TYPE_2D, // imageType
772 colorFormat, // format
773 { renderSize.x(), renderSize.y(), 1 }, // extent
774 1u, // mipLevels
775 1u, // arraySize
776 VK_SAMPLE_COUNT_1_BIT, // samples
777 VK_IMAGE_TILING_OPTIMAL, // tiling
778 imageUsageFlag, // usage
779 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
780 1u, // queueFamilyIndexCount
781 &queueFamilyIndex, // pQueueFamilyIndices
782 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
783 };
784
785 renderImage = createImage(vk, *m_deviceGroup, &imageParams);
786 readImage = createImage(vk, *m_deviceGroup, &imageParams);
787
788 dedicatedAllocInfo.image = *renderImage;
789 dedicatedAllocInfo.buffer = DE_NULL;
790 memReqs = getImageMemoryRequirements(vk, *m_deviceGroup, renderImage.get());
791 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, m_useHostMemory ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
792 allocInfo.allocationSize = memReqs.size;
793 allocInfo.memoryTypeIndex = memoryTypeNdx;
794 imageMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
795 }
796
797 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *renderImage, imageMemory.get(), 0));
798 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *readImage, imageMemory.get(), 0));
799
800 // Create renderpass
801 {
802 const VkAttachmentDescription colorAttachmentDescription =
803 {
804 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags
805 colorFormat, // VkFormat format
806 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
807 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp
808 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp
809 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp
810 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp
811 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout
812 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout
813 };
814
815 const VkAttachmentReference colorAttachmentRef =
816 {
817 0u, // deUint32 attachment
818 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout
819 };
820
821 const VkSubpassDescription subpassDescription =
822 {
823 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
824 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
825 0u, // deUint32 inputAttachmentCount
826 DE_NULL, // const VkAttachmentReference* pInputAttachments
827 1u, // deUint32 colorAttachmentCount
828 &colorAttachmentRef, // const VkAttachmentReference* pColorAttachments
829 DE_NULL, // const VkAttachmentReference* pResolveAttachments
830 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
831 0u, // deUint32 preserveAttachmentCount
832 DE_NULL // const deUint32* pPreserveAttachments
833 };
834
835 const VkRenderPassCreateInfo renderPassInfo =
836 {
837 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType
838 DE_NULL, // const void* pNext
839 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
840 1, // deUint32 attachmentCount
841 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments
842 1u, // deUint32 subpassCount
843 &subpassDescription, // const VkSubpassDescription* pSubpasses
844 0u, // deUint32 dependencyCount
845 DE_NULL // const VkSubpassDependency* pDependencies
846 };
847
848 renderPass = createRenderPass(vk, *m_deviceGroup, &renderPassInfo, DE_NULL);
849 }
850
851 // Create descriptors
852 {
853 vector<VkDescriptorSetLayoutBinding> layoutBindings;
854 vector<VkDescriptorPoolSize> descriptorTypes;
855 vector<VkWriteDescriptorSet> writeDescritporSets;
856
857 const VkDescriptorSetLayoutBinding layoutBindingUBO =
858 {
859 0u, // deUint32 binding;
860 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
861 1u, // deUint32 descriptorCount;
862 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags stageFlags;
863 DE_NULL // const VkSampler* pImmutableSamplers;
864 };
865 const VkDescriptorSetLayoutBinding layoutBindingSBO =
866 {
867 1u, // deUint32 binding;
868 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
869 1u, // deUint32 descriptorCount;
870 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // VkShaderStageFlags stageFlags;
871 DE_NULL // const VkSampler* pImmutableSamplers;
872 };
873
874 layoutBindings.push_back(layoutBindingUBO);
875 if (m_drawTessellatedSphere)
876 layoutBindings.push_back(layoutBindingSBO);
877
878 const VkDescriptorSetLayoutCreateInfo descriptorLayoutParams =
879 {
880 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
881 DE_NULL, // cost void* pNext;
882 (VkDescriptorSetLayoutCreateFlags)0, // VkDescriptorSetLayoutCreateFlags flags
883 deUint32(layoutBindings.size()), // deUint32 count;
884 layoutBindings.data() // const VkDescriptorSetLayoutBinding pBinding;
885 };
886 descriptorSetLayout = createDescriptorSetLayout(vk, *m_deviceGroup, &descriptorLayoutParams);
887
888 const VkDescriptorPoolSize descriptorTypeUBO =
889 {
890 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType type;
891 1 // deUint32 count;
892 };
893 const VkDescriptorPoolSize descriptorTypeSBO =
894 {
895 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType type;
896 1 // deUint32 count;
897 };
898 descriptorTypes.push_back(descriptorTypeUBO);
899 if (m_drawTessellatedSphere)
900 descriptorTypes.push_back(descriptorTypeSBO);
901
902 const VkDescriptorPoolCreateInfo descriptorPoolParams =
903 {
904 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, // VkStructureType sType;
905 DE_NULL, // void* pNext;
906 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, // VkDescriptorPoolCreateFlags flags;
907 1u, // deUint32 maxSets;
908 deUint32(descriptorTypes.size()), // deUint32 count;
909 descriptorTypes.data() // const VkDescriptorTypeCount* pTypeCount
910 };
911 descriptorPool = createDescriptorPool(vk, *m_deviceGroup, &descriptorPoolParams);
912
913 const VkDescriptorSetAllocateInfo descriptorSetParams =
914 {
915 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
916 DE_NULL,
917 *descriptorPool,
918 1u,
919 &descriptorSetLayout.get(),
920 };
921 descriptorSet = allocateDescriptorSet(vk, *m_deviceGroup, &descriptorSetParams);
922
923 const VkDescriptorBufferInfo uboDescriptorInfo =
924 {
925 uniformBuffer.get(),
926 0,
927 (VkDeviceSize)sizeof(drawColor)
928 };
929 const VkDescriptorBufferInfo sboDescriptorInfo =
930 {
931 sboBuffer.get(),
932 0,
933 (VkDeviceSize)sizeof(tessLevel)
934 };
935 const VkWriteDescriptorSet writeDescritporSetUBO =
936 {
937 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
938 DE_NULL, // const void* pNext;
939 *descriptorSet, // VkDescriptorSet destSet;
940 0, // deUint32 destBinding;
941 0, // deUint32 destArrayElement;
942 1u, // deUint32 count;
943 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
944 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
945 &uboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
946 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
947 };
948
949 const VkWriteDescriptorSet writeDescritporSetSBO =
950 {
951 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
952 DE_NULL, // const void* pNext;
953 *descriptorSet, // VkDescriptorSet destSet;
954 1, // deUint32 destBinding;
955 0, // deUint32 destArrayElement;
956 1u, // deUint32 count;
957 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
958 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
959 &sboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
960 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
961 };
962 writeDescritporSets.push_back(writeDescritporSetUBO);
963 if (m_drawTessellatedSphere)
964 writeDescritporSets.push_back(writeDescritporSetSBO);
965
966 vk.updateDescriptorSets(*m_deviceGroup, deUint32(writeDescritporSets.size()), writeDescritporSets.data(), 0u, DE_NULL);
967 }
968
969 // Create Pipeline
970 {
971 Move<VkShaderModule> vertShaderModule;
972 Move<VkShaderModule> tcssShaderModule;
973 Move<VkShaderModule> tessShaderModule;
974 Move<VkShaderModule> fragShaderModule;
975
976 const VkDescriptorSetLayout descset = descriptorSetLayout.get();
977 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
978 {
979 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
980 DE_NULL, // pNext
981 (vk::VkPipelineLayoutCreateFlags)0, // flags
982 1u, // setLayoutCount
983 &descset, // pSetLayouts
984 0u, // pushConstantRangeCount
985 DE_NULL, // pPushConstantRanges
986 };
987 pipelineLayout = createPipelineLayout(vk, *m_deviceGroup, &pipelineLayoutParams);
988
989 // Shaders
990 vertShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("vert"), 0);
991 fragShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("frag"), 0);
992
993 if (m_drawTessellatedSphere)
994 {
995 tcssShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tesc"), 0);
996 tessShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tese"), 0);
997 }
998
999 const std::vector<VkViewport> viewports (1, makeViewport(renderSize));
1000 const std::vector<VkRect2D> scissors (1, makeRect2D(renderSize));
1001
1002 const VkPipelineRasterizationStateCreateInfo rasterParams =
1003 {
1004 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
1005 DE_NULL, // pNext
1006 0u, // flags
1007 VK_FALSE, // depthClampEnable
1008 VK_FALSE, // rasterizerDiscardEnable
1009 m_fillModeNonSolid ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL, // polygonMode
1010 VK_CULL_MODE_NONE, // cullMode
1011 VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
1012 VK_FALSE, // depthBiasEnable
1013 0.0f, // depthBiasConstantFactor
1014 0.0f, // depthBiasClamp
1015 0.0f, // depthBiasSlopeFactor
1016 1.0f, // lineWidth
1017 };
1018
1019 const VkPrimitiveTopology topology = m_drawTessellatedSphere ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
1020
1021 pipeline = makeGraphicsPipeline(vk, // const DeviceInterface& vk
1022 *m_deviceGroup, // const VkDevice device
1023 *pipelineLayout, // const VkPipelineLayout pipelineLayout
1024 *vertShaderModule, // const VkShaderModule vertexShaderModule
1025 m_drawTessellatedSphere ? *tcssShaderModule : DE_NULL, // const VkShaderModule tessellationControlModule,
1026 m_drawTessellatedSphere ? *tessShaderModule : DE_NULL, // const VkShaderModule tessellationEvalModule,
1027 DE_NULL, // const VkShaderModule geometryShaderModule
1028 *fragShaderModule, // const VkShaderModule fragmentShaderModule
1029 *renderPass, // const VkRenderPass renderPass
1030 viewports, // const std::vector<VkViewport>& viewports
1031 scissors, // const std::vector<VkRect2D>& scissors
1032 topology, // const VkPrimitiveTopology topology
1033 0u, // const deUint32 subpass
1034 3u, // const deUint32 patchControlPoints
1035 DE_NULL, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
1036 &rasterParams); // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
1037 }
1038
1039 // Create Framebuffer
1040 {
1041 const VkImageViewCreateInfo colorAttViewParams =
1042 {
1043 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1044 DE_NULL, // pNext
1045 0u, // flags
1046 *renderImage, // image
1047 VK_IMAGE_VIEW_TYPE_2D, // viewType
1048 colorFormat, // format
1049 {
1050 VK_COMPONENT_SWIZZLE_R,
1051 VK_COMPONENT_SWIZZLE_G,
1052 VK_COMPONENT_SWIZZLE_B,
1053 VK_COMPONENT_SWIZZLE_A
1054 }, // components
1055 {
1056 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1057 0u, // baseMipLevel
1058 1u, // levelCount
1059 0u, // baseArrayLayer
1060 1u, // layerCount
1061 }, // subresourceRange
1062 };
1063 colorAttView = createImageView(vk, *m_deviceGroup, &colorAttViewParams);
1064
1065 const VkFramebufferCreateInfo framebufferParams =
1066 {
1067 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1068 DE_NULL, // pNext
1069 0u, // flags
1070 *renderPass, // renderPass
1071 1u, // attachmentCount
1072 &*colorAttView, // pAttachments
1073 renderSize.x(), // width
1074 renderSize.y(), // height
1075 1u, // layers
1076 };
1077 framebuffer = createFramebuffer(vk, *m_deviceGroup, &framebufferParams);
1078 }
1079
1080 // Create Command buffer
1081 {
1082 const VkCommandPoolCreateInfo cmdPoolParams =
1083 {
1084 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1085 DE_NULL, // pNext
1086 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1087 queueFamilyIndex, // queueFamilyIndex
1088 };
1089 cmdPool = createCommandPool(vk, *m_deviceGroup, &cmdPoolParams);
1090
1091 const VkCommandBufferAllocateInfo cmdBufParams =
1092 {
1093 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1094 DE_NULL, // pNext
1095 *cmdPool, // pool
1096 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1097 1u, // bufferCount
1098 };
1099 cmdBuffer = allocateCommandBuffer(vk, *m_deviceGroup, &cmdBufParams);
1100 }
1101
1102 // Do a layout transition for renderImage
1103 {
1104 beginCommandBuffer(vk, *cmdBuffer);
1105 const VkImageMemoryBarrier colorAttBarrier =
1106 {
1107 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1108 DE_NULL, // pNext
1109 0u, // srcAccessMask
1110 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
1111 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT), // dstAccessMask
1112 VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1113 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1114 queueFamilyIndex, // srcQueueFamilyIndex
1115 queueFamilyIndex, // dstQueueFamilyIndex
1116 *renderImage, // image
1117 {
1118 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1119 0u, // baseMipLevel
1120 1u, // levelCount
1121 0u, // baseArrayLayer
1122 1u, // layerCount
1123 } // subresourceRange
1124 };
1125 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &colorAttBarrier);
1126
1127 endCommandBuffer(vk, *cmdBuffer);
1128 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1129 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1130 }
1131
1132 // Bind renderImage across devices for SFR
1133 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
1134 {
1135 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
1136 TCU_THROW(NotSupportedError, "Peer texture reads is not supported.");
1137
1138 // Check if peer memory can be used as source of a copy command in case of SFR bindings, always allowed in case of 1 device
1139 VkPeerMemoryFeatureFlags peerMemFeatures;
1140 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[secondDeviceID]);
1141 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps.memoryTypes[memoryTypeNdx].heapIndex, firstDeviceID, secondDeviceID, &peerMemFeatures);
1142 isPeerMemAsCopySrcAllowed = (peerMemFeatures & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT);
1143
1144 VkRect2D zeroRect = {
1145 {
1146 0, // VkOffset2D.x
1147 0, // VkOffset2D.x
1148 },
1149 {
1150 0, // VkExtent2D.x
1151 0, // VkExtent2D.x
1152 }
1153 };
1154 vector<VkRect2D> sfrRects;
1155 for (deUint32 i = 0; i < m_physicalDeviceCount*m_physicalDeviceCount; i++)
1156 sfrRects.push_back(zeroRect);
1157
1158 if (m_physicalDeviceCount == 1u)
1159 {
1160 sfrRects[0].extent.width = (deInt32)renderSize.x();
1161 sfrRects[0].extent.height = (deInt32)renderSize.y();
1162 }
1163 else
1164 {
1165 // Split into 2 vertical halves
1166 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1167 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.height = (deInt32)renderSize.y();
1168 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
1169 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1170 sfrRects[secondDeviceID * m_physicalDeviceCount + firstDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
1171 sfrRects[secondDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID];
1172 }
1173
1174 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1175 {
1176 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1177 DE_NULL, // pNext
1178 0u, // deviceIndexCount
1179 DE_NULL, // pDeviceIndices
1180 m_physicalDeviceCount*m_physicalDeviceCount, // SFRRectCount
1181 &sfrRects[0], // pSFRRects
1182 };
1183
1184 VkBindImageMemoryInfo bindInfo =
1185 {
1186 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1187 &devGroupBindInfo, // pNext
1188 *renderImage, // image
1189 imageMemory.get(), // memory
1190 0u, // memoryOffset
1191 };
1192 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1193 }
1194
1195 // Begin recording
1196 beginCommandBuffer(vk, *cmdBuffer);
1197
1198 // Update buffers
1199 {
1200 const VkBufferMemoryBarrier stagingVertexBufferUpdateBarrier =
1201 {
1202 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1203 DE_NULL, // const void* pNext;
1204 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1205 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1206 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1207 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1208 stagingVertexBuffer.get(), // VkBuffer buffer;
1209 0u, // VkDeviceSize offset;
1210 verticesSize // VkDeviceSize size;
1211 };
1212
1213 const VkBufferMemoryBarrier vertexBufferUpdateBarrier =
1214 {
1215 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1216 DE_NULL, // const void* pNext;
1217 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1218 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags dstAccessMask;
1219 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1220 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1221 vertexBuffer.get(), // VkBuffer buffer;
1222 0u, // VkDeviceSize offset;
1223 verticesSize // VkDeviceSize size;
1224 };
1225
1226 const VkBufferMemoryBarrier stagingIndexBufferUpdateBarrier =
1227 {
1228 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1229 DE_NULL, // const void* pNext;
1230 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1231 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1232 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1233 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1234 stagingIndexBuffer.get(), // VkBuffer buffer;
1235 0u, // VkDeviceSize offset;
1236 indicesSize // VkDeviceSize size;
1237 };
1238
1239 const VkBufferMemoryBarrier indexBufferUpdateBarrier =
1240 {
1241 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1242 DE_NULL, // const void* pNext;
1243 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1244 VK_ACCESS_INDEX_READ_BIT, // VkAccessFlags dstAccessMask;
1245 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1246 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1247 indexBuffer.get(), // VkBuffer buffer;
1248 0u, // VkDeviceSize offset;
1249 indicesSize // VkDeviceSize size;
1250 };
1251
1252 const VkBufferMemoryBarrier stagingUboBufferUpdateBarrier =
1253 {
1254 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1255 DE_NULL, // const void* pNext;
1256 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1257 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1258 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1259 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1260 stagingUniformBuffer.get(), // VkBuffer buffer;
1261 0u, // VkDeviceSize offset;
1262 indicesSize // VkDeviceSize size;
1263 };
1264
1265 const VkBufferMemoryBarrier uboUpdateBarrier =
1266 {
1267 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1268 DE_NULL, // const void* pNext;
1269 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1270 VK_ACCESS_UNIFORM_READ_BIT, // VkAccessFlags dstAccessMask;
1271 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1272 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1273 uniformBuffer.get(), // VkBuffer buffer;
1274 0u, // VkDeviceSize offset;
1275 sizeof(drawColor) // VkDeviceSize size;
1276 };
1277
1278
1279 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingVertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1280 VkBufferCopy vertexBufferCopy = { 0u, 0u, verticesSize };
1281 vk.cmdCopyBuffer(*cmdBuffer, stagingVertexBuffer.get(), vertexBuffer.get(), 1u, &vertexBufferCopy);
1282 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &vertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1283
1284 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingIndexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1285 VkBufferCopy indexBufferCopy = { 0u, 0u, indicesSize };
1286 vk.cmdCopyBuffer(*cmdBuffer, stagingIndexBuffer.get(), indexBuffer.get(), 1u, &indexBufferCopy);
1287 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &indexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1288
1289 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingUboBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1290 VkBufferCopy uboBufferCopy = { 0u, 0u, sizeof(drawColor) };
1291 vk.cmdCopyBuffer(*cmdBuffer, stagingUniformBuffer.get(), uniformBuffer.get(), 1u, &uboBufferCopy);
1292 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &uboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1293
1294 if (m_drawTessellatedSphere)
1295 {
1296 const VkBufferMemoryBarrier stagingsboUpdateBarrier =
1297 {
1298 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1299 DE_NULL, // const void* pNext;
1300 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1301 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1302 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1303 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1304 stagingSboBuffer.get(), // VkBuffer buffer;
1305 0u, // VkDeviceSize offset;
1306 sizeof(tessLevel) // VkDeviceSize size;
1307 };
1308
1309 const VkBufferMemoryBarrier sboUpdateBarrier =
1310 {
1311 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1312 DE_NULL, // const void* pNext;
1313 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1314 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1315 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1316 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1317 sboBuffer.get(), // VkBuffer buffer;
1318 0u, // VkDeviceSize offset;
1319 sizeof(tessLevel) // VkDeviceSize size;
1320 };
1321
1322 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingsboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1323 VkBufferCopy sboBufferCopy = { 0u, 0u, sizeof(tessLevel) };
1324 vk.cmdCopyBuffer(*cmdBuffer, stagingSboBuffer.get(), sboBuffer.get(), 1u, &sboBufferCopy);
1325 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &sboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1326 }
1327
1328 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1329 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
1330 {
1331 const VkDeviceSize bindingOffset = 0;
1332 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &bindingOffset);
1333 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT32);
1334 }
1335 }
1336
1337 // Begin renderpass
1338 {
1339 const VkClearValue clearValue = makeClearValueColorF32(
1340 clearColor[0],
1341 clearColor[1],
1342 clearColor[2],
1343 clearColor[3]);
1344
1345 VkRect2D zeroRect = { { 0, 0, },{ 0, 0, } };
1346 vector<VkRect2D> renderAreas;
1347 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
1348 renderAreas.push_back(zeroRect);
1349
1350 // Render completely if there is only 1 device
1351 if (m_physicalDeviceCount == 1u)
1352 {
1353 renderAreas[0].extent.width = (deInt32)renderSize.x();
1354 renderAreas[0].extent.height = (deInt32)renderSize.y();
1355 }
1356 else
1357 {
1358 // Split into 2 vertical halves
1359 renderAreas[firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1360 renderAreas[firstDeviceID].extent.height = (deInt32)renderSize.y();
1361 renderAreas[secondDeviceID] = renderAreas[firstDeviceID];
1362 renderAreas[secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1363 }
1364
1365 const VkDeviceGroupRenderPassBeginInfo deviceGroupRPBeginInfo =
1366 {
1367 VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,
1368 DE_NULL,
1369 (deUint32)((1 << m_physicalDeviceCount) - 1),
1370 m_physicalDeviceCount,
1371 &renderAreas[0]
1372 };
1373
1374 const VkRenderPassBeginInfo passBeginParams =
1375 {
1376 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1377 (m_testMode & TEST_MODE_SFR) ? &deviceGroupRPBeginInfo : DE_NULL, // pNext
1378 *renderPass, // renderPass
1379 *framebuffer, // framebuffer
1380 {
1381 { 0, 0 },
1382 { renderSize.x(), renderSize.y() }
1383 }, // renderArea
1384 1u, // clearValueCount
1385 &clearValue, // pClearValues
1386 };
1387 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginParams, VK_SUBPASS_CONTENTS_INLINE);
1388 }
1389
1390 // Draw
1391 if (m_testMode & TEST_MODE_AFR)
1392 {
1393 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1394 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1395
1396 }
1397 else
1398 {
1399 vk.cmdSetDeviceMask(*cmdBuffer, ((1 << firstDeviceID) | (1 << secondDeviceID)));
1400 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1401 }
1402 endRenderPass(vk, *cmdBuffer);
1403
1404 // Change image layout for copy
1405 {
1406 const VkImageMemoryBarrier renderFinishBarrier =
1407 {
1408 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1409 DE_NULL, // pNext
1410 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // outputMask
1411 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
1412 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1413 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1414 queueFamilyIndex, // srcQueueFamilyIndex
1415 queueFamilyIndex, // dstQueueFamilyIndex
1416 *renderImage, // image
1417 {
1418 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1419 0u, // baseMipLevel
1420 1u, // mipLevels
1421 0u, // baseArraySlice
1422 1u, // arraySize
1423 } // subresourceRange
1424 };
1425 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &renderFinishBarrier);
1426 }
1427
1428 endCommandBuffer(vk, *cmdBuffer);
1429
1430 // Submit & wait for completion
1431 {
1432 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1433 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1434 }
1435
1436 // Copy image from secondDeviceID in case of AFR and SFR(only if Peer memory as copy source is not allowed)
1437 if ((m_physicalDeviceCount > 1) && ((m_testMode & TEST_MODE_AFR) || (!isPeerMemAsCopySrcAllowed)))
1438 {
1439 Move<VkImage> peerImage;
1440
1441 // Create and bind peer image
1442 {
1443 const VkImageCreateInfo peerImageParams =
1444 {
1445 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
1446 DE_NULL, // pNext
1447 VK_IMAGE_CREATE_ALIAS_BIT, // flags
1448 VK_IMAGE_TYPE_2D, // imageType
1449 colorFormat, // format
1450 { renderSize.x(), renderSize.y(), 1 }, // extent
1451 1u, // mipLevels
1452 1u, // arraySize
1453 VK_SAMPLE_COUNT_1_BIT, // samples
1454 VK_IMAGE_TILING_OPTIMAL, // tiling
1455 imageUsageFlag, // usage
1456 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1457 1u, // queueFamilyIndexCount
1458 &queueFamilyIndex, // pQueueFamilyIndices
1459 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
1460 };
1461 peerImage = createImage(vk, *m_deviceGroup, &peerImageParams);
1462
1463 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1464 {
1465 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1466 DE_NULL, // pNext
1467 m_physicalDeviceCount, // deviceIndexCount
1468 &deviceIndices[0], // pDeviceIndices
1469 0u, // SFRRectCount
1470 DE_NULL, // pSFRRects
1471 };
1472
1473 VkBindImageMemoryInfo bindInfo =
1474 {
1475 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1476 &devGroupBindInfo, // pNext
1477 peerImage.get(), // image
1478 imageMemory.get(), // memory
1479 0u, // memoryOffset
1480 };
1481 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1482 }
1483
1484 // Copy peer image (only needed in SFR case when peer memory as copy source is not allowed)
1485 {
1486 // Change layout on firstDeviceID
1487 {
1488 const VkImageMemoryBarrier preCopyBarrier =
1489 {
1490 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1491 DE_NULL, // const void* pNext;
1492 0, // VkAccessFlags srcAccessMask;
1493 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1494 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1495 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1496 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1497 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1498 *renderImage, // VkImage image;
1499 { // VkImageSubresourceRange subresourceRange;
1500 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1501 0u, // deUint32 baseMipLevel;
1502 1u, // deUint32 mipLevels;
1503 0u, // deUint32 baseArraySlice;
1504 1u // deUint32 arraySize;
1505 }
1506 };
1507
1508 beginCommandBuffer(vk, *cmdBuffer);
1509 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1510 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &preCopyBarrier);
1511 endCommandBuffer(vk, *cmdBuffer);
1512
1513 const deUint32 deviceMask = 1 << firstDeviceID;
1514 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1515 }
1516
1517 // Copy Image from secondDeviceID to firstDeviceID
1518 {
1519 // AFR: Copy entire image from secondDeviceID
1520 // SFR: Copy the right half of image from secondDeviceID to firstDeviceID, so that the copy
1521 // to a buffer below (for checking) does not require VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
1522 deInt32 imageOffsetX = (m_testMode & TEST_MODE_AFR) ? 0 : renderSize.x() / 2;
1523 deUint32 imageExtentX = (m_testMode & TEST_MODE_AFR) ? (deUint32)renderSize.x() : (deUint32)renderSize.x() / 2;
1524
1525 const VkImageCopy imageCopy =
1526 {
1527 {
1528 VK_IMAGE_ASPECT_COLOR_BIT,
1529 0, // mipLevel
1530 0, // arrayLayer
1531 1 // layerCount
1532 },
1533 { imageOffsetX, 0, 0 },
1534 {
1535 VK_IMAGE_ASPECT_COLOR_BIT,
1536 0, // mipLevel
1537 0, // arrayLayer
1538 1 // layerCount
1539 },
1540 { imageOffsetX, 0, 0 },
1541 {
1542 imageExtentX,
1543 (deUint32)renderSize.y(),
1544 1u
1545 }
1546 };
1547
1548 beginCommandBuffer(vk, *cmdBuffer);
1549 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1550 vk.cmdCopyImage(*cmdBuffer, *renderImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *peerImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageCopy);
1551 endCommandBuffer(vk, *cmdBuffer);
1552
1553 const deUint32 deviceMask = 1 << secondDeviceID;
1554 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1555 }
1556
1557 // Change layout back on firstDeviceID
1558 {
1559 const VkImageMemoryBarrier postCopyBarrier =
1560 {
1561 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1562 DE_NULL, // const void* pNext;
1563 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1564 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1565 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1566 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1567 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1568 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1569 *renderImage, // VkImage image;
1570 { // VkImageSubresourceRange subresourceRange;
1571 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1572 0u, // deUint32 baseMipLevel;
1573 1u, // deUint32 mipLevels;
1574 0u, // deUint32 baseArraySlice;
1575 1u // deUint32 arraySize;
1576 }
1577 };
1578
1579 beginCommandBuffer(vk, *cmdBuffer);
1580 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1581 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &postCopyBarrier);
1582 endCommandBuffer(vk, *cmdBuffer);
1583
1584 const deUint32 deviceMask = 1 << firstDeviceID;
1585 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1586 }
1587 }
1588 }
1589
1590 // copy image to read buffer for checking
1591 {
1592 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(sizeof(deUint32) * renderSize.x() * renderSize.y());
1593 const VkBufferCreateInfo readImageBufferParams =
1594 {
1595 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
1596 DE_NULL, // pNext
1597 (VkBufferCreateFlags)0u, // flags
1598 imageSizeBytes, // size
1599 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
1600 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1601 1u, // queueFamilyIndexCount
1602 &queueFamilyIndex, // pQueueFamilyIndices
1603 };
1604 const Unique<VkBuffer> readImageBuffer(createBuffer(vk, *m_deviceGroup, &readImageBufferParams));
1605 const UniquePtr<Allocation> readImageBufferMemory(memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *readImageBuffer), MemoryRequirement::HostVisible));
1606 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
1607
1608 beginCommandBuffer(vk, *cmdBuffer);
1609
1610 // Copy image to buffer
1611 {
1612 const VkBufferImageCopy copyParams =
1613 {
1614 (VkDeviceSize)0u, // bufferOffset
1615 renderSize.x(), // bufferRowLength
1616 renderSize.y(), // bufferImageHeight
1617 {
1618 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1619 0u, // mipLevel
1620 0u, // baseArrayLayer
1621 1u, // layerCount
1622 }, // imageSubresource
1623 { 0, 0, 0 }, // imageOffset
1624 {
1625 renderSize.x(),
1626 renderSize.y(),
1627 1u
1628 } // imageExtent
1629 };
1630
1631 // Use a diffferent binding in SFR when peer memory as copy source is not allowed
1632 vk.cmdCopyImageToBuffer(*cmdBuffer, isPeerMemAsCopySrcAllowed ? *renderImage : *readImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1633
1634 const VkBufferMemoryBarrier copyFinishBarrier =
1635 {
1636 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1637 DE_NULL, // pNext
1638 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1639 VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1640 queueFamilyIndex, // srcQueueFamilyIndex
1641 queueFamilyIndex, // dstQueueFamilyIndex
1642 *readImageBuffer, // buffer
1643 0u, // offset
1644 imageSizeBytes // size
1645 };
1646 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©FinishBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1647 }
1648 endCommandBuffer(vk, *cmdBuffer);
1649
1650 // Submit & wait for completion
1651 {
1652 const deUint32 deviceMask = 1 << firstDeviceID;
1653 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1654 }
1655
1656 // Read results and check against reference image
1657 if (m_drawTessellatedSphere)
1658 {
1659 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1660 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1661 invalidateAlloc(vk, *m_deviceGroup, *readImageBufferMemory);
1662
1663 tcu::TextureLevel referenceImage;
1664 string refImage = m_fillModeNonSolid ? "vulkan/data/device_group/sphere.png" : "vulkan/data/device_group/spherefilled.png";
1665 tcu::ImageIO::loadPNG(referenceImage, m_context.getTestContext().getArchive(), refImage.c_str());
1666 iterateResultSuccess = tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", "Image Comparison",
1667 referenceImage.getAccess(), resultAccess, 0.001f, tcu::COMPARE_LOG_RESULT);
1668 }
1669 else
1670 {
1671 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1672 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1673 invalidateAlloc(vk, *m_deviceGroup, *readImageBufferMemory);
1674
1675 // Render reference and compare
1676 {
1677 tcu::TextureLevel refImage(tcuFormat, (deInt32)renderSize.x(), (deInt32)renderSize.y());
1678 const tcu::UVec4 threshold(0u);
1679 const tcu::IVec3 posDeviation(1, 1, 0);
1680
1681 tcu::clear(refImage.getAccess(), clearColor);
1682 renderReferenceTriangle(refImage.getAccess(), triVertices, m_context.getDeviceProperties().limits.subPixelPrecisionBits);
1683
1684 iterateResultSuccess = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
1685 "ComparisonResult",
1686 "Image comparison result",
1687 refImage.getAccess(),
1688 resultAccess,
1689 threshold,
1690 posDeviation,
1691 false,
1692 tcu::COMPARE_LOG_RESULT);
1693 }
1694 }
1695 }
1696
1697 if (!iterateResultSuccess)
1698 return tcu::TestStatus::fail("Image comparison failed");
1699 }
1700
1701 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Device group verification passed");
1702 }
1703
1704 template<class Instance>
1705 class DeviceGroupTestCase : public TestCase
1706 {
1707 public:
DeviceGroupTestCase(tcu::TestContext & context,const char * name,const char * description,deUint32 mode)1708 DeviceGroupTestCase (tcu::TestContext& context,
1709 const char* name,
1710 const char* description,
1711 deUint32 mode)
1712 : TestCase(context, name, description)
1713 , m_testMode (mode)
1714 {}
1715
1716 private:
1717
1718 deUint32 m_testMode;
1719
createInstance(Context & context) const1720 TestInstance* createInstance (Context& context) const
1721 {
1722 return new Instance(context, m_testMode);
1723 }
1724
initPrograms(vk::SourceCollections & programCollection) const1725 void initPrograms (vk::SourceCollections& programCollection) const
1726 {
1727 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1728 "layout(location = 0) in vec4 in_Position;\n"
1729 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1730 "void main() {\n"
1731 " gl_Position = in_Position;\n"
1732 " gl_PointSize = 1.0;\n"
1733 "}\n");
1734
1735 if (m_testMode & TEST_MODE_TESSELLATION)
1736 {
1737 programCollection.glslSources.add("tesc") << glu::TessellationControlSource("#version 450\n"
1738 "#extension GL_EXT_tessellation_shader : require\n"
1739 "layout(vertices=3) out;\n"
1740 "layout(set=0, binding=1) buffer tessLevel { \n"
1741 " float tessLvl;\n"
1742 "};\n"
1743 "void main()\n"
1744 "{\n"
1745 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
1746 " if (gl_InvocationID == 0) {\n"
1747 " for (int i = 0; i < 4; i++)\n"
1748 " gl_TessLevelOuter[i] = tessLvl;\n"
1749 " for (int i = 0; i < 2; i++)\n"
1750 " gl_TessLevelInner[i] = tessLvl;\n"
1751 " }\n"
1752 "}\n");
1753
1754 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource("#version 450\n"
1755 "#extension GL_EXT_tessellation_shader : require\n"
1756 "layout(triangles) in;\n"
1757 "layout(equal_spacing) in;\n"
1758 "layout(ccw) in;\n"
1759 "void main()\n"
1760 "{\n"
1761 " vec4 pos = vec4(0, 0, 0, 0);\n"
1762 " vec3 tessCoord = gl_TessCoord.xyz;\n"
1763 " pos += tessCoord.z * gl_in[0].gl_Position;\n"
1764 " pos += tessCoord.x * gl_in[1].gl_Position;\n"
1765 " pos += tessCoord.y * gl_in[2].gl_Position;\n"
1766 " vec3 sign = sign(pos.xyz);\n"
1767 " pos.xyz = 0.785398 - abs(pos.xyz) * 1.5707963;\n"
1768 " pos.xyz = (1 - tan(pos.xyz))/2.0;\n"
1769 " pos.xyz = (sign * pos.xyz) / length(pos.xyz);\n"
1770 " gl_Position = pos;\n"
1771 "}\n");
1772 }
1773
1774 programCollection.glslSources.add("frag") << glu::FragmentSource("#version 430\n"
1775 "layout(location = 0) out vec4 out_FragColor;\n"
1776 "layout(std140, set=0, binding=0) uniform bufferData { \n"
1777 " vec4 color;\n"
1778 "};\n"
1779 "void main()\n"
1780 "{\n"
1781 " out_FragColor = color;\n"
1782 "}\n");
1783 }
1784 };
1785
1786 } //anonymous
1787
1788 class DeviceGroupTestRendering : public tcu::TestCaseGroup
1789 {
1790 public:
1791 DeviceGroupTestRendering (tcu::TestContext& testCtx);
~DeviceGroupTestRendering(void)1792 ~DeviceGroupTestRendering (void) {}
1793 void init(void);
1794
1795 private:
1796 DeviceGroupTestRendering (const DeviceGroupTestRendering& other);
1797 DeviceGroupTestRendering& operator= (const DeviceGroupTestRendering& other);
1798 };
1799
DeviceGroupTestRendering(tcu::TestContext & testCtx)1800 DeviceGroupTestRendering::DeviceGroupTestRendering (tcu::TestContext& testCtx)
1801 : TestCaseGroup (testCtx, "device_group", "Testing device group test cases")
1802 {
1803 // Left blank on purpose
1804 }
1805
init(void)1806 void DeviceGroupTestRendering::init (void)
1807 {
1808 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr", "Test split frame rendering", TEST_MODE_SFR));
1809 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_SFR | TEST_MODE_HOSTMEMORY));
1810 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_SFR | TEST_MODE_DEDICATED));
1811 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_SFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1812
1813 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr", "Test alternate frame rendering", TEST_MODE_AFR));
1814 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_AFR | TEST_MODE_HOSTMEMORY));
1815 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_AFR | TEST_MODE_DEDICATED));
1816 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_AFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1817
1818 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated", "Test split frame rendering with tessellated sphere", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1819 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated_linefill", "Test split frame rendering with tessellated sphere with line segments", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1820 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated", "Test alternate frame rendering with tesselated sphere", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1821 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated_linefill", "Test alternate frame rendering with tesselated sphere with line segments", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1822 }
1823
createTests(tcu::TestContext & testCtx)1824 tcu::TestCaseGroup* createTests(tcu::TestContext& testCtx)
1825 {
1826 return new DeviceGroupTestRendering(testCtx);
1827 }
1828 } // DeviceGroup
1829 } // vkt
1830