1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2019 The Khronos Group Inc.
6 * Copyright (c) 2019 Google Inc.
7 * Copyright (c) 2017 Codeplay Software Ltd.
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 */ /*!
22 * \file
23 * \brief Subgroups Tests
24 */ /*--------------------------------------------------------------------*/
25
26 #include "vktSubgroupsArithmeticTests.hpp"
27 #include "vktSubgroupsScanHelpers.hpp"
28 #include "vktSubgroupsTestsUtils.hpp"
29
30 #include <string>
31 #include <vector>
32
33 using namespace tcu;
34 using namespace std;
35 using namespace vk;
36 using namespace vkt;
37
38 namespace
39 {
40 enum OpType
41 {
42 OPTYPE_ADD = 0,
43 OPTYPE_MUL,
44 OPTYPE_MIN,
45 OPTYPE_MAX,
46 OPTYPE_AND,
47 OPTYPE_OR,
48 OPTYPE_XOR,
49 OPTYPE_INCLUSIVE_ADD,
50 OPTYPE_INCLUSIVE_MUL,
51 OPTYPE_INCLUSIVE_MIN,
52 OPTYPE_INCLUSIVE_MAX,
53 OPTYPE_INCLUSIVE_AND,
54 OPTYPE_INCLUSIVE_OR,
55 OPTYPE_INCLUSIVE_XOR,
56 OPTYPE_EXCLUSIVE_ADD,
57 OPTYPE_EXCLUSIVE_MUL,
58 OPTYPE_EXCLUSIVE_MIN,
59 OPTYPE_EXCLUSIVE_MAX,
60 OPTYPE_EXCLUSIVE_AND,
61 OPTYPE_EXCLUSIVE_OR,
62 OPTYPE_EXCLUSIVE_XOR,
63 OPTYPE_LAST
64 };
65
getOperator(OpType t)66 static Operator getOperator(OpType t)
67 {
68 switch (t)
69 {
70 case OPTYPE_ADD:
71 case OPTYPE_INCLUSIVE_ADD:
72 case OPTYPE_EXCLUSIVE_ADD:
73 return OPERATOR_ADD;
74 case OPTYPE_MUL:
75 case OPTYPE_INCLUSIVE_MUL:
76 case OPTYPE_EXCLUSIVE_MUL:
77 return OPERATOR_MUL;
78 case OPTYPE_MIN:
79 case OPTYPE_INCLUSIVE_MIN:
80 case OPTYPE_EXCLUSIVE_MIN:
81 return OPERATOR_MIN;
82 case OPTYPE_MAX:
83 case OPTYPE_INCLUSIVE_MAX:
84 case OPTYPE_EXCLUSIVE_MAX:
85 return OPERATOR_MAX;
86 case OPTYPE_AND:
87 case OPTYPE_INCLUSIVE_AND:
88 case OPTYPE_EXCLUSIVE_AND:
89 return OPERATOR_AND;
90 case OPTYPE_OR:
91 case OPTYPE_INCLUSIVE_OR:
92 case OPTYPE_EXCLUSIVE_OR:
93 return OPERATOR_OR;
94 case OPTYPE_XOR:
95 case OPTYPE_INCLUSIVE_XOR:
96 case OPTYPE_EXCLUSIVE_XOR:
97 return OPERATOR_XOR;
98 default:
99 DE_FATAL("Unsupported op type");
100 return OPERATOR_ADD;
101 }
102 }
103
getScanType(OpType t)104 static ScanType getScanType(OpType t)
105 {
106 switch (t)
107 {
108 case OPTYPE_ADD:
109 case OPTYPE_MUL:
110 case OPTYPE_MIN:
111 case OPTYPE_MAX:
112 case OPTYPE_AND:
113 case OPTYPE_OR:
114 case OPTYPE_XOR:
115 return SCAN_REDUCE;
116 case OPTYPE_INCLUSIVE_ADD:
117 case OPTYPE_INCLUSIVE_MUL:
118 case OPTYPE_INCLUSIVE_MIN:
119 case OPTYPE_INCLUSIVE_MAX:
120 case OPTYPE_INCLUSIVE_AND:
121 case OPTYPE_INCLUSIVE_OR:
122 case OPTYPE_INCLUSIVE_XOR:
123 return SCAN_INCLUSIVE;
124 case OPTYPE_EXCLUSIVE_ADD:
125 case OPTYPE_EXCLUSIVE_MUL:
126 case OPTYPE_EXCLUSIVE_MIN:
127 case OPTYPE_EXCLUSIVE_MAX:
128 case OPTYPE_EXCLUSIVE_AND:
129 case OPTYPE_EXCLUSIVE_OR:
130 case OPTYPE_EXCLUSIVE_XOR:
131 return SCAN_EXCLUSIVE;
132 default:
133 DE_FATAL("Unsupported op type");
134 return SCAN_REDUCE;
135 }
136 }
137
checkVertexPipelineStages(const void * internalData,std::vector<const void * > datas,deUint32 width,deUint32)138 static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
139 deUint32 width, deUint32)
140 {
141 DE_UNREF(internalData);
142 return vkt::subgroups::check(datas, width, 0x3);
143 }
144
checkCompute(const void * internalData,std::vector<const void * > datas,const deUint32 numWorkgroups[3],const deUint32 localSize[3],deUint32)145 static bool checkCompute(const void* internalData, std::vector<const void*> datas,
146 const deUint32 numWorkgroups[3], const deUint32 localSize[3],
147 deUint32)
148 {
149 DE_UNREF(internalData);
150 return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0x3);
151 }
152
getOpTypeName(Operator op,ScanType scanType)153 std::string getOpTypeName(Operator op, ScanType scanType)
154 {
155 return getScanOpName("subgroup", "", op, scanType);
156 }
157
158 struct CaseDefinition
159 {
160 Operator op;
161 ScanType scanType;
162 VkShaderStageFlags shaderStage;
163 VkFormat format;
164 de::SharedPtr<bool> geometryPointSizeSupported;
165 deBool requiredSubgroupSize;
166 };
167
getExtHeader(CaseDefinition caseDef)168 std::string getExtHeader(CaseDefinition caseDef)
169 {
170 return "#extension GL_KHR_shader_subgroup_arithmetic: enable\n"
171 "#extension GL_KHR_shader_subgroup_ballot: enable\n" +
172 subgroups::getAdditionalExtensionForFormat(caseDef.format);
173 }
174
getIndexVars(CaseDefinition caseDef)175 std::string getIndexVars(CaseDefinition caseDef)
176 {
177 switch (caseDef.scanType)
178 {
179 case SCAN_REDUCE:
180 return " uint start = 0, end = gl_SubgroupSize;\n";
181 case SCAN_INCLUSIVE:
182 return " uint start = 0, end = gl_SubgroupInvocationID + 1;\n";
183 case SCAN_EXCLUSIVE:
184 return " uint start = 0, end = gl_SubgroupInvocationID;\n";
185 }
186 DE_FATAL("Unreachable");
187 return "";
188 }
189
getTestSrc(CaseDefinition caseDef)190 std::string getTestSrc(CaseDefinition caseDef)
191 {
192 std::string indexVars = getIndexVars(caseDef);
193
194 return " uvec4 mask = subgroupBallot(true);\n"
195 + indexVars +
196 " " + subgroups::getFormatNameForGLSL(caseDef.format) + " ref = "
197 + getIdentity(caseDef.op, caseDef.format) + ";\n"
198 " tempRes = 0;\n"
199 " for (uint index = start; index < end; index++)\n"
200 " {\n"
201 " if (subgroupBallotBitExtract(mask, index))\n"
202 " {\n"
203 " ref = " + getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]") + ";\n"
204 " }\n"
205 " }\n"
206 " tempRes = " + getCompare(caseDef.op, caseDef.format, "ref", getOpTypeName(caseDef.op, caseDef.scanType) + "(data[gl_SubgroupInvocationID])") + " ? 0x1 : 0;\n"
207 " if (1 == (gl_SubgroupInvocationID % 2))\n"
208 " {\n"
209 " mask = subgroupBallot(true);\n"
210 " ref = " + getIdentity(caseDef.op, caseDef.format) + ";\n"
211 " for (uint index = start; index < end; index++)\n"
212 " {\n"
213 " if (subgroupBallotBitExtract(mask, index))\n"
214 " {\n"
215 " ref = " + getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]") + ";\n"
216 " }\n"
217 " }\n"
218 " tempRes |= " + getCompare(caseDef.op, caseDef.format, "ref", getOpTypeName(caseDef.op, caseDef.scanType) + "(data[gl_SubgroupInvocationID])") + " ? 0x2 : 0;\n"
219 " }\n"
220 " else\n"
221 " {\n"
222 " tempRes |= 0x2;\n"
223 " }\n";
224 }
225
initFrameBufferPrograms(SourceCollections & programCollection,CaseDefinition caseDef)226 void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
227 {
228 const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
229
230 std::string extHeader = getExtHeader(caseDef);
231 std::string testSrc = getTestSrc(caseDef);
232
233 subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
234 }
235
initPrograms(SourceCollections & programCollection,CaseDefinition caseDef)236 void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
237 {
238 const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
239
240 std::string extHeader = getExtHeader(caseDef);
241 std::string testSrc = getTestSrc(caseDef);
242
243 subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
244 }
245
supportedCheck(Context & context,CaseDefinition caseDef)246 void supportedCheck (Context& context, CaseDefinition caseDef)
247 {
248 if (!subgroups::isSubgroupSupported(context))
249 TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
250
251 if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_ARITHMETIC_BIT))
252 TCU_THROW(NotSupportedError, "Device does not support subgroup arithmetic operations");
253
254 if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
255 TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
256
257 if (caseDef.requiredSubgroupSize)
258 {
259 if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
260 TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
261 VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
262 subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
263 subgroupSizeControlFeatures.pNext = DE_NULL;
264
265 VkPhysicalDeviceFeatures2 features;
266 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
267 features.pNext = &subgroupSizeControlFeatures;
268
269 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
270
271 if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
272 TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
273
274 if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
275 TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
276
277 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
278 subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
279 subgroupSizeControlProperties.pNext = DE_NULL;
280
281 VkPhysicalDeviceProperties2 properties;
282 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
283 properties.pNext = &subgroupSizeControlProperties;
284
285 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
286
287 if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
288 TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
289 }
290
291 *caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
292 }
293
noSSBOtest(Context & context,const CaseDefinition caseDef)294 tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
295 {
296 if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
297 {
298 if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
299 {
300 return tcu::TestStatus::fail(
301 "Shader stage " +
302 subgroups::getShaderStageName(caseDef.shaderStage) +
303 " is required to support subgroup operations!");
304 }
305 else
306 {
307 TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
308 }
309 }
310
311 subgroups::SSBOData inputData;
312 inputData.format = caseDef.format;
313 inputData.layout = subgroups::SSBOData::LayoutStd140;
314 inputData.numElements = subgroups::maxSupportedSubgroupSize();
315 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
316
317 if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
318 return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
319 else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
320 return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
321 else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
322 return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
323 else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
324 return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
325 else
326 TCU_THROW(InternalError, "Unhandled shader stage");
327 }
328
329
test(Context & context,const CaseDefinition caseDef)330 tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
331 {
332 if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
333 {
334 if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
335 {
336 return tcu::TestStatus::fail(
337 "Shader stage " +
338 subgroups::getShaderStageName(caseDef.shaderStage) +
339 " is required to support subgroup operations!");
340 }
341 subgroups::SSBOData inputData;
342 inputData.format = caseDef.format;
343 inputData.layout = subgroups::SSBOData::LayoutStd430;
344 inputData.numElements = subgroups::maxSupportedSubgroupSize();
345 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
346
347 if (caseDef.requiredSubgroupSize == DE_FALSE)
348 return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
349
350 tcu::TestLog& log = context.getTestContext().getLog();
351 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
352 subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
353 subgroupSizeControlProperties.pNext = DE_NULL;
354 VkPhysicalDeviceProperties2 properties;
355 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
356 properties.pNext = &subgroupSizeControlProperties;
357
358 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
359
360 log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
361 << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
362
363 // According to the spec, requiredSubgroupSize must be a power-of-two integer.
364 for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
365 {
366 tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
367 size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
368 if (result.getCode() != QP_TEST_RESULT_PASS)
369 {
370 log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
371 return result;
372 }
373 }
374
375 return tcu::TestStatus::pass("OK");
376 }
377 else
378 {
379 VkPhysicalDeviceSubgroupProperties subgroupProperties;
380 subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
381 subgroupProperties.pNext = DE_NULL;
382
383 VkPhysicalDeviceProperties2 properties;
384 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
385 properties.pNext = &subgroupProperties;
386
387 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
388
389 VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
390
391 if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
392 {
393 if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
394 TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
395 else
396 stages = VK_SHADER_STAGE_FRAGMENT_BIT;
397 }
398
399 if ((VkShaderStageFlagBits)0u == stages)
400 TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
401
402 subgroups::SSBOData inputData;
403 inputData.format = caseDef.format;
404 inputData.layout = subgroups::SSBOData::LayoutStd430;
405 inputData.numElements = subgroups::maxSupportedSubgroupSize();
406 inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
407 inputData.binding = 4u;
408 inputData.stages = stages;
409
410 return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData,
411 1, DE_NULL, checkVertexPipelineStages, stages);
412 }
413 }
414 }
415
416 namespace vkt
417 {
418 namespace subgroups
419 {
createSubgroupsArithmeticTests(tcu::TestContext & testCtx)420 tcu::TestCaseGroup* createSubgroupsArithmeticTests(tcu::TestContext& testCtx)
421 {
422 de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
423 testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
424 de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
425 testCtx, "compute", "Subgroup arithmetic category tests: compute"));
426 de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
427 testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
428
429 const VkShaderStageFlags stages[] =
430 {
431 VK_SHADER_STAGE_VERTEX_BIT,
432 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
433 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
434 VK_SHADER_STAGE_GEOMETRY_BIT,
435 };
436
437 const std::vector<VkFormat> formats = subgroups::getAllFormats();
438
439 for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
440 {
441 const VkFormat format = formats[formatIndex];
442
443 for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
444 {
445 bool isBool = subgroups::isFormatBool(format);
446 bool isFloat = subgroups::isFormatFloat(format);
447
448 OpType opType = static_cast<OpType>(opTypeIndex);
449 Operator op = getOperator(opType);
450 ScanType st = getScanType(opType);
451
452 bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
453
454 // Skip float with bitwise category.
455 if (isFloat && isBitwiseOp)
456 continue;
457
458 // Skip bool when its not the bitwise category.
459 if (isBool && !isBitwiseOp)
460 continue;
461
462 const std::string name = de::toLower(getOpTypeName(op, st)) + "_" + subgroups::getFormatNameForGLSL(format);
463
464 {
465 CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
466 addFunctionCaseWithPrograms(computeGroup.get(), name,
467 "", supportedCheck, initPrograms, test, caseDef);
468 caseDef.requiredSubgroupSize = DE_TRUE;
469 addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize",
470 "", supportedCheck, initPrograms, test, caseDef);
471 }
472
473 {
474 const CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE};
475 addFunctionCaseWithPrograms(graphicGroup.get(), name,
476 "", supportedCheck, initPrograms, test, caseDef);
477 }
478
479 for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
480 {
481 const CaseDefinition caseDef = {op, st, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
482 addFunctionCaseWithPrograms(framebufferGroup.get(), name +
483 "_" + getShaderStageName(caseDef.shaderStage), "",
484 supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
485 }
486 }
487 }
488
489 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
490 testCtx, "arithmetic", "Subgroup arithmetic category tests"));
491
492 group->addChild(graphicGroup.release());
493 group->addChild(computeGroup.release());
494 group->addChild(framebufferGroup.release());
495
496 return group.release();
497 }
498 } // subgroups
499 } // vkt
500