summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaciej Jesionowski <maciej.jesionowski@mobica.com>2016-04-07 09:57:13 +0200
committerMaciej Jesionowski <maciej.jesionowski@mobica.com>2016-04-18 09:49:46 +0200
commit66980a8d7f3440111c785bc782bfd9411364269a (patch)
treef669dfa3edbe1de31806723a7d4a845d815aa555
parent7ce210c1fddad1577e487357f6c5954991559f93 (diff)
downloadVK-GL-CTS-66980a8d7f3440111c785bc782bfd9411364269a.tar.gz
VK-GL-CTS-66980a8d7f3440111c785bc782bfd9411364269a.tar.bz2
VK-GL-CTS-66980a8d7f3440111c785bc782bfd9411364269a.zip
Add specialization constants tests
Adds the spec. constant. tests for graphics and compute pipelines. Includes: - GLSL-based shaders - Basic, vector, matrix, struct, array constants - Work group size specialization - Built-in constant specialization - Spec. const as array size Excludes: - Spec. constant expressions (OpSpecConstantOp), as they are tested extensively in assembly tests.
-rw-r--r--doc/testspecs/VK/pipeline.spec_constant.txt81
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/CMakeLists.txt4
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.cpp1870
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.hpp39
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.cpp639
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.hpp194
-rw-r--r--external/vulkancts/modules/vulkan/pipeline/vktPipelineTests.cpp2
-rw-r--r--external/vulkancts/mustpass/1.0.0/vk-default.txt1033
-rw-r--r--framework/opengl/gluShaderUtil.cpp237
-rw-r--r--framework/opengl/gluShaderUtil.hpp26
10 files changed, 4096 insertions, 29 deletions
diff --git a/doc/testspecs/VK/pipeline.spec_constant.txt b/doc/testspecs/VK/pipeline.spec_constant.txt
new file mode 100644
index 000000000..842b91fd2
--- /dev/null
+++ b/doc/testspecs/VK/pipeline.spec_constant.txt
@@ -0,0 +1,81 @@
+ Specialization Constants tests
+
+Tests:
+ + dEQP-VK.pipeline.spec_constant.*
+
+Includes:
+ + Uses extended GLSL (KHR_vulkan_glsl) to specify shaders
+ (dependency on glslang)
+ + Basic specialization constants (OpSpecConstant, OpSpecConstantTrue,
+ OpSpecConstantFalse instructions)
+ - No specialization info (default values)
+ - Partial and full specialization
+ - 32 bit boolean, integer and float types
+ - (optional) 16 and 64 bit types, where supported
+ + Specialization constants in composites (OpSpecConstantComposite instruction)
+ - struct members
+ - array elements
+ - vector components
+ - matrix columns
+ + Specialization constants in expressions (OpSpecConstantOp instruction)
+ - Array size
+ - (optional) Various instructions as listed in the spec
+ + Compute shader work group size specialization
+ + Built-in constant specialization (override the default value)
+ + All of the above should be exercised with all applicable shader stages in
+ both pipeline types (compute and graphics).
+
+Excludes:
+ + SPIR-V assembly code
+ + OpSpecConstantOp instructions are covered by assembly tests in
+ external/vulkancts/modules/vulkan/spirv_assembly/vktSpvAsmInstructionTests.cpp
+
+Notes:
+ + SPIR-V generated from GLSL should be inspected for instruction coverage
+ and overall correctness before accepting the tests.
+
+Description:
+
+The tests will run various shader stages with some specialization constants.
+Constant values are read in the shader, written to a SSBO, and read back from
+a host-visible buffer. Depending on the test, none, some, or all specialization
+constants are defined through VkSpecializationInfo. The test passes if the value
+written by the shader matches the expected reference value (either the default
+or a set specialized value).
+
+For expression tests result of an operation is written to the output SSBO and
+then compared to a reference operation result.
+
+A test with no specialization info verifies that the default values defined in
+the shader are used correctly. Specialization with one or more specialization
+constants check if Vulkan structure is consumed correctly by the API.
+
+Different types and type widths are tested to see if provided value is passed
+correctly to the shader. The tests will use types and expressions similar to the
+following (examples are not exhaustive):
+
+ // Basic specialization constants and const expressions
+
+ layout(constant_id = 7) const int foo = 3;
+ layout(constant_id = 9) const float bar = 6.5;
+
+ int data[foo];
+ int data2[foo + 2];
+
+ // Specialization constant expressions
+
+ const float expr_fadd = bar + 3.5; // OpFAdd
+
+ // Specialization constant composites
+
+ const vec3 sc_vec = vec3(1.0, bar, 3.0);
+ const int sc_array[4] = int[](foo, 2, 3, 4);
+
+ // Override work group size
+
+ layout(local_size_y_id = 19) in; // y=? (id=19)
+ layout(local_size_x_id = 20, local_size_z_id = 21) in; // x=? z=?
+
+ // Override Built-in constant
+
+ layout(constant_id = 13) gl_MaxImageUnits;
diff --git a/external/vulkancts/modules/vulkan/pipeline/CMakeLists.txt b/external/vulkancts/modules/vulkan/pipeline/CMakeLists.txt
index 328210f91..b7fd16f43 100644
--- a/external/vulkancts/modules/vulkan/pipeline/CMakeLists.txt
+++ b/external/vulkancts/modules/vulkan/pipeline/CMakeLists.txt
@@ -17,6 +17,10 @@ set(DEQP_VK_PIPELINE_SRCS
vktPipelineImageTests.hpp
vktPipelinePushConstantTests.cpp
vktPipelinePushConstantTests.hpp
+ vktPipelineSpecConstantTests.hpp
+ vktPipelineSpecConstantTests.cpp
+ vktPipelineSpecConstantUtil.hpp
+ vktPipelineSpecConstantUtil.cpp
vktPipelineImageUtil.cpp
vktPipelineImageUtil.hpp
vktPipelineImageViewTests.cpp
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.cpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.cpp
new file mode 100644
index 000000000..d36f9b799
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.cpp
@@ -0,0 +1,1870 @@
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline specialization constants tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineSpecConstantTests.hpp"
+#include "vktTestCase.hpp"
+#include "vktPipelineSpecConstantUtil.hpp"
+
+#include "tcuTestLog.hpp"
+#include "tcuTexture.hpp"
+#include "tcuFormatUtil.hpp"
+
+#include "gluShaderUtil.hpp"
+
+#include "vkBuilderUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkRefUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkImageUtil.hpp"
+
+#include "deUniquePtr.hpp"
+#include "deStringUtil.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+namespace
+{
+
+//! Raw memory storage for values used in test cases.
+//! We use it to simplify test case definitions where different types are expected in the result.
+class GenericValue
+{
+public:
+ GenericValue (void) { clear(); }
+
+ //! Copy up to 'size' bytes of 'data'.
+ GenericValue (const void* data, const deUint32 size)
+ {
+ DE_ASSERT(size <= sizeof(m_data));
+ clear();
+ deMemcpy(&m_data, data, size);
+ }
+
+private:
+ deUint64 m_data;
+
+ void clear (void) { m_data = 0; }
+};
+
+inline GenericValue makeValueBool32 (const bool a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueInt32 (const deInt32 a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueInt64 (const deInt64 a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueUint32 (const deUint32 a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueUint64 (const deUint64 a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueFloat32 (const float a) { return GenericValue(&a, sizeof(a)); }
+inline GenericValue makeValueFloat64 (const double a) { return GenericValue(&a, sizeof(a)); }
+
+struct SpecConstant
+{
+ deUint32 specID; //!< specialization constant ID
+ std::string declarationCode; //!< syntax to declare the constant, use ${ID} as an ID placeholder
+ deUint32 size; //!< data size on the host, 0 = no specialized value
+ GenericValue specValue; //!< specialized value passed by the API
+
+ SpecConstant (const deUint32 specID_, const std::string declarationCode_)
+ : specID (specID_)
+ , declarationCode (declarationCode_)
+ , size (0)
+ , specValue ()
+ {
+ }
+
+ SpecConstant (const deUint32 specID_, const std::string declarationCode_, const deUint32 size_, const GenericValue specValue_)
+ : specID (specID_)
+ , declarationCode (declarationCode_)
+ , size (size_)
+ , specValue (specValue_)
+ {
+ }
+};
+
+//! Useful when referring to a value in a buffer (i.e. check expected values in SSBO).
+struct OffsetValue
+{
+ deUint32 size; //!< data size in the buffer (up to sizeof(value))
+ deUint32 offset; //!< offset into the buffer
+ GenericValue value; //!< value expected to be there
+
+ OffsetValue (const deUint32 size_, const deUint32 offset_, const GenericValue value_)
+ : size (size_)
+ , offset (offset_)
+ , value (value_)
+ {}
+};
+
+//! Get the integer value of 'size' bytes at 'memory' location.
+deUint64 memoryAsInteger (const void* memory, const deUint32 size)
+{
+ DE_ASSERT(size <= sizeof(deUint64));
+ deUint64 value = 0;
+ deMemcpy(&value, memory, size);
+ return value;
+}
+
+inline std::string memoryAsHexString (const void* memory, const deUint32 size)
+{
+ const deUint8* memoryBytePtr = static_cast<const deUint8*>(memory);
+ return de::toString(tcu::formatArray(tcu::Format::HexIterator<deUint8>(memoryBytePtr), tcu::Format::HexIterator<deUint8>(memoryBytePtr + size)));
+}
+
+void logValueMismatch (tcu::TestLog& log, const void* expected, const void* actual, const deUint32 offset, const deUint32 size)
+{
+ const bool canDisplayValue = (size <= sizeof(deUint64));
+ log << tcu::TestLog::Message
+ << "Comparison failed for value at offset " << de::toString(offset) << ": expected "
+ << (canDisplayValue ? de::toString(memoryAsInteger(expected, size)) + " " : "") << memoryAsHexString(expected, size) << " but got "
+ << (canDisplayValue ? de::toString(memoryAsInteger(actual, size)) + " " : "") << memoryAsHexString(actual, size)
+ << tcu::TestLog::EndMessage;
+}
+
+//! Check if expected values exist in the memory.
+bool verifyValues (tcu::TestLog& log, const void* memory, const std::vector<OffsetValue>& expectedValues)
+{
+ bool ok = true;
+ log << tcu::TestLog::Section("compare", "Verify result values");
+
+ for (std::vector<OffsetValue>::const_iterator it = expectedValues.begin(); it < expectedValues.end(); ++it)
+ {
+ const char* const valuePtr = static_cast<const char*>(memory) + it->offset;
+ if (deMemCmp(valuePtr, &it->value, it->size) != 0)
+ {
+ ok = false;
+ logValueMismatch(log, &it->value, valuePtr, it->offset, it->size);
+ }
+ }
+
+ if (ok)
+ log << tcu::TestLog::Message << "All OK" << tcu::TestLog::EndMessage;
+
+ log << tcu::TestLog::EndSection;
+ return ok;
+}
+
+//! Bundles together common test case parameters.
+struct CaseDefinition
+{
+ std::string name; //!< Test case name
+ std::vector<SpecConstant> specConstants; //!< list of specialization constants to declare
+ VkDeviceSize ssboSize; //!< required ssbo size in bytes
+ std::string ssboCode; //!< ssbo member definitions
+ std::string globalCode; //!< generic shader code outside the main function (e.g. declarations)
+ std::string mainCode; //!< generic shader code to execute in main (e.g. assignments)
+ std::vector<OffsetValue> expectedValues; //!< list of values to check inside the ssbo buffer
+ FeatureFlags requirements; //!< features the implementation must support to allow this test to run
+};
+
+//! Manages Vulkan structures to pass specialization data.
+class Specialization
+{
+public:
+ Specialization (const std::vector<SpecConstant>& specConstants);
+
+ //! Can return NULL if nothing is specialized
+ const VkSpecializationInfo* getSpecializationInfo (void) const { return m_entries.size() > 0 ? &m_specialization : DE_NULL; }
+
+private:
+ std::vector<GenericValue> m_data;
+ std::vector<VkSpecializationMapEntry> m_entries;
+ VkSpecializationInfo m_specialization;
+};
+
+Specialization::Specialization (const std::vector<SpecConstant>& specConstants)
+{
+ m_data.reserve(specConstants.size());
+ m_entries.reserve(specConstants.size());
+
+ deUint32 offset = 0;
+ for (std::vector<SpecConstant>::const_iterator it = specConstants.begin(); it != specConstants.end(); ++it)
+ if (it->size != 0)
+ {
+ m_data.push_back(it->specValue);
+ m_entries.push_back(makeSpecializationMapEntry(it->specID, offset, it->size));
+ offset += sizeof(GenericValue);
+ }
+
+ if (m_entries.size() > 0)
+ {
+ m_specialization.mapEntryCount = static_cast<deUint32>(m_entries.size());
+ m_specialization.pMapEntries = &m_entries[0];
+ m_specialization.dataSize = sizeof(GenericValue) * m_data.size();
+ m_specialization.pData = &m_data[0];
+ }
+ else
+ deMemset(&m_specialization, 0, sizeof(m_specialization));
+}
+
+class SpecConstantTest : public TestCase
+{
+public:
+ SpecConstantTest (tcu::TestContext& testCtx,
+ const VkShaderStageFlagBits stage, //!< which shader stage is tested
+ const CaseDefinition& caseDef);
+
+ void initPrograms (SourceCollections& programCollection) const;
+ TestInstance* createInstance (Context& context) const;
+
+private:
+ const VkShaderStageFlagBits m_stage;
+ const CaseDefinition m_caseDef;
+};
+
+SpecConstantTest::SpecConstantTest (tcu::TestContext& testCtx,
+ const VkShaderStageFlagBits stage,
+ const CaseDefinition& caseDef)
+ : TestCase (testCtx, caseDef.name, "")
+ , m_stage (stage)
+ , m_caseDef (caseDef)
+{
+}
+
+//! Build a string that declares all specialization constants, replacing ${ID} with proper ID numbers.
+std::string generateSpecConstantCode (const std::vector<SpecConstant>& specConstants)
+{
+ std::ostringstream code;
+ for (std::vector<SpecConstant>::const_iterator it = specConstants.begin(); it != specConstants.end(); ++it)
+ {
+ std::string decl = it->declarationCode;
+ const std::string::size_type pos = decl.find("${ID}");
+ if (pos != std::string::npos)
+ decl.replace(pos, 5, de::toString(it->specID));
+ code << decl << "\n";
+ }
+ code << "\n";
+ return code.str();
+}
+
+std::string generateSSBOCode (const std::string& memberDeclarations)
+{
+ std::ostringstream code;
+ code << "layout (set = 0, binding = 0, std430) writeonly buffer Output {\n"
+ << memberDeclarations
+ << "} sb_out;\n"
+ << "\n";
+ return code.str();
+}
+
+void SpecConstantTest::initPrograms (SourceCollections& programCollection) const
+{
+ // Always add vertex and fragment to graphics stages
+ VkShaderStageFlags requiredStages = m_stage;
+
+ if (requiredStages & VK_SHADER_STAGE_ALL_GRAPHICS)
+ requiredStages |= VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ if (requiredStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
+ requiredStages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
+
+ // Either graphics or compute must be defined, but not both
+ DE_ASSERT(((requiredStages & VK_SHADER_STAGE_ALL_GRAPHICS) != 0) != ((requiredStages & VK_SHADER_STAGE_COMPUTE_BIT) != 0));
+
+ if (requiredStages & VK_SHADER_STAGE_VERTEX_BIT)
+ {
+ const bool useSpecConst = (m_stage == VK_SHADER_STAGE_VERTEX_BIT);
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "layout(location = 0) in highp vec4 position;\n"
+ << "\n"
+ << (useSpecConst ? generateSpecConstantCode(m_caseDef.specConstants) : "")
+ << (useSpecConst ? generateSSBOCode(m_caseDef.ssboCode) : "")
+ << (useSpecConst ? m_caseDef.globalCode + "\n" : "")
+ << "void main (void)\n"
+ << "{\n"
+ << (useSpecConst ? m_caseDef.mainCode + "\n" : "")
+ << " gl_Position = position;\n"
+ << "}\n";
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
+ }
+
+ if (requiredStages & VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ const bool useSpecConst = (m_stage == VK_SHADER_STAGE_FRAGMENT_BIT);
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "layout(location = 0) out highp vec4 fragColor;\n"
+ << "\n"
+ << (useSpecConst ? generateSpecConstantCode(m_caseDef.specConstants) : "")
+ << (useSpecConst ? generateSSBOCode(m_caseDef.ssboCode) : "")
+ << (useSpecConst ? m_caseDef.globalCode + "\n" : "")
+ << "void main (void)\n"
+ << "{\n"
+ << (useSpecConst ? m_caseDef.mainCode + "\n" : "")
+ << " fragColor = vec4(1.0, 1.0, 0.0, 1.0);\n"
+ << "}\n";
+
+ programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
+ }
+
+ if (requiredStages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
+ {
+ const bool useSpecConst = (m_stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "layout(vertices = 3) out;\n"
+ << "\n"
+ << (useSpecConst ? generateSpecConstantCode(m_caseDef.specConstants) : "")
+ << (useSpecConst ? generateSSBOCode(m_caseDef.ssboCode) : "")
+ << (useSpecConst ? m_caseDef.globalCode + "\n" : "")
+ << "void main (void)\n"
+ << "{\n"
+ << (useSpecConst ? m_caseDef.mainCode + "\n" : "")
+ << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
+ << " if (gl_InvocationID == 0)\n"
+ << " {\n"
+ << " gl_TessLevelInner[0] = 3;\n"
+ << " gl_TessLevelOuter[0] = 2;\n"
+ << " gl_TessLevelOuter[1] = 2;\n"
+ << " gl_TessLevelOuter[2] = 2;\n"
+ << " }\n"
+ << "}\n";
+
+ programCollection.glslSources.add("tesc") << glu::TessellationControlSource(src.str());
+ }
+
+ if (requiredStages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
+ {
+ const bool useSpecConst = (m_stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "layout(triangles, equal_spacing, ccw) in;\n"
+ << "\n"
+ << (useSpecConst ? generateSpecConstantCode(m_caseDef.specConstants) : "")
+ << (useSpecConst ? generateSSBOCode(m_caseDef.ssboCode) : "")
+ << (useSpecConst ? m_caseDef.globalCode + "\n" : "")
+ << "void main (void)\n"
+ << "{\n"
+ << (useSpecConst ? m_caseDef.mainCode + "\n" : "")
+ << " vec3 p0 = gl_TessCoord.x * gl_in[0].gl_Position.xyz;\n"
+ << " vec3 p1 = gl_TessCoord.y * gl_in[1].gl_Position.xyz;\n"
+ << " vec3 p2 = gl_TessCoord.z * gl_in[2].gl_Position.xyz;\n"
+ << " gl_Position = vec4(p0 + p1 + p2, 1.0);\n"
+ << "}\n";
+
+ programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(src.str());
+ }
+
+ if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
+ {
+ const bool useSpecConst = (m_stage == VK_SHADER_STAGE_GEOMETRY_BIT);
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "layout(triangles) in;\n"
+ << "layout(triangle_strip, max_vertices = 3) out;\n"
+ << "\n"
+ << (useSpecConst ? generateSpecConstantCode(m_caseDef.specConstants) : "")
+ << (useSpecConst ? generateSSBOCode(m_caseDef.ssboCode) : "")
+ << (useSpecConst ? m_caseDef.globalCode + "\n" : "")
+ << "void main (void)\n"
+ << "{\n"
+ << (useSpecConst ? m_caseDef.mainCode + "\n" : "")
+ << " gl_Position = gl_in[0].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " gl_Position = gl_in[1].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " gl_Position = gl_in[2].gl_Position;\n"
+ << " EmitVertex();\n"
+ << "\n"
+ << " EndPrimitive();\n"
+ << "}\n";
+
+ programCollection.glslSources.add("geom") << glu::GeometrySource(src.str());
+ }
+
+ if (requiredStages & VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ // Don't define work group size, use the default or specialization constants
+ << "\n"
+ << generateSpecConstantCode(m_caseDef.specConstants)
+ << generateSSBOCode(m_caseDef.ssboCode)
+ << m_caseDef.globalCode + "\n"
+ << "void main (void)\n"
+ << "{\n"
+ << m_caseDef.mainCode
+ << "}\n";
+
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
+ }
+}
+
+class ComputeTestInstance : public TestInstance
+{
+public:
+ ComputeTestInstance (Context& context,
+ const VkDeviceSize ssboSize,
+ const std::vector<SpecConstant>& specConstants,
+ const std::vector<OffsetValue>& expectedValues);
+
+ tcu::TestStatus iterate (void);
+
+private:
+ const VkDeviceSize m_ssboSize;
+ const std::vector<SpecConstant> m_specConstants;
+ const std::vector<OffsetValue> m_expectedValues;
+};
+
+ComputeTestInstance::ComputeTestInstance (Context& context,
+ const VkDeviceSize ssboSize,
+ const std::vector<SpecConstant>& specConstants,
+ const std::vector<OffsetValue>& expectedValues)
+ : TestInstance (context)
+ , m_ssboSize (ssboSize)
+ , m_specConstants (specConstants)
+ , m_expectedValues (expectedValues)
+{
+}
+
+tcu::TestStatus ComputeTestInstance::iterate (void)
+{
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // Descriptors
+
+ const Buffer resultBuffer(vk, device, allocator, makeBufferCreateInfo(m_ssboSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
+
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(vk, device));
+
+ const Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+
+ const Unique<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
+ const VkDescriptorBufferInfo descriptorBufferInfo = makeDescriptorBufferInfo(resultBuffer.get(), 0ull, m_ssboSize);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorBufferInfo)
+ .update(vk, device);
+
+ // Specialization
+
+ const Specialization specialization (m_specConstants);
+ const VkSpecializationInfo* pSpecInfo = specialization.getSpecializationInfo();
+
+ // Pipeline
+
+ const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, m_context.getBinaryCollection().get("comp"), 0));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout (vk, device, *descriptorSetLayout));
+ const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule, pSpecInfo));
+ const Unique<VkCommandPool> cmdPool (makeCommandPool (vk, device, queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer (vk, device, *cmdPool));
+
+ beginCommandBuffer(vk, *cmdBuffer);
+
+ vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+
+ vk.cmdDispatch(*cmdBuffer, 1u, 1u, 1u);
+
+ {
+ const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
+ VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, m_ssboSize);
+
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
+ 0u, DE_NULL, 1u, &shaderWriteBarrier, 0u, DE_NULL);
+ }
+
+ endCommandBuffer(vk, *cmdBuffer);
+ submitCommandsAndWait(vk, device, queue, *cmdBuffer);
+
+ // Verify results
+
+ const Allocation& resultAlloc = resultBuffer.getAllocation();
+ invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), m_ssboSize);
+
+ if (verifyValues(m_context.getTestContext().getLog(), resultAlloc.getHostPtr(), m_expectedValues))
+ return tcu::TestStatus::pass("Success");
+ else
+ return tcu::TestStatus::fail("Values did not match");
+}
+
+class GraphicsTestInstance : public TestInstance
+{
+public:
+ GraphicsTestInstance (Context& context,
+ const VkDeviceSize ssboSize,
+ const std::vector<SpecConstant>& specConstants,
+ const std::vector<OffsetValue>& expectedValues,
+ const VkShaderStageFlagBits stage);
+
+ tcu::TestStatus iterate (void);
+
+private:
+ const VkDeviceSize m_ssboSize;
+ const std::vector<SpecConstant> m_specConstants;
+ const std::vector<OffsetValue> m_expectedValues;
+ const VkShaderStageFlagBits m_stage;
+};
+
+GraphicsTestInstance::GraphicsTestInstance (Context& context,
+ const VkDeviceSize ssboSize,
+ const std::vector<SpecConstant>& specConstants,
+ const std::vector<OffsetValue>& expectedValues,
+ const VkShaderStageFlagBits stage)
+ : TestInstance (context)
+ , m_ssboSize (ssboSize)
+ , m_specConstants (specConstants)
+ , m_expectedValues (expectedValues)
+ , m_stage (stage)
+{
+}
+
+tcu::TestStatus GraphicsTestInstance::iterate (void)
+{
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // Color attachment
+
+ const tcu::IVec2 renderSize = tcu::IVec2(32, 32);
+ const VkFormat imageFormat = VK_FORMAT_R8G8B8A8_UNORM;
+ const Image colorImage (vk, device, allocator, makeImageCreateInfo(renderSize, imageFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT), MemoryRequirement::Any);
+ const Unique<VkImageView> colorImageView(makeImageView(vk, device, *colorImage, VK_IMAGE_VIEW_TYPE_2D, imageFormat));
+
+ // Vertex buffer
+
+ const deUint32 numVertices = 3;
+ const VkDeviceSize vertexBufferSizeBytes = sizeof(tcu::Vec4) * numVertices;
+ const Buffer vertexBuffer (vk, device, allocator, makeBufferCreateInfo(vertexBufferSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), MemoryRequirement::HostVisible);
+
+ {
+ const Allocation& alloc = vertexBuffer.getAllocation();
+ tcu::Vec4* pVertices = reinterpret_cast<tcu::Vec4*>(alloc.getHostPtr());
+
+ pVertices[0] = tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f);
+ pVertices[1] = tcu::Vec4(-1.0f, 1.0f, 0.0f, 1.0f);
+ pVertices[2] = tcu::Vec4( 1.0f, -1.0f, 0.0f, 1.0f);
+
+ flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), vertexBufferSizeBytes);
+ // No barrier needed, flushed memory is automatically visible
+ }
+
+ // Descriptors
+
+ const Buffer resultBuffer(vk, device, allocator, makeBufferCreateInfo(m_ssboSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
+
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL_GRAPHICS)
+ .build(vk, device));
+
+ const Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+
+ const Unique<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
+ const VkDescriptorBufferInfo descriptorBufferInfo = makeDescriptorBufferInfo(resultBuffer.get(), 0ull, m_ssboSize);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorBufferInfo)
+ .update(vk, device);
+
+ // Specialization
+
+ const Specialization specialization (m_specConstants);
+ const VkSpecializationInfo* pSpecInfo = specialization.getSpecializationInfo();
+
+ // Pipeline
+
+ const Unique<VkRenderPass> renderPass (makeRenderPass (vk, device, imageFormat));
+ const Unique<VkFramebuffer> framebuffer (makeFramebuffer (vk, device, *renderPass, *colorImageView, renderSize.x(), renderSize.y()));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
+ const Unique<VkCommandPool> cmdPool (makeCommandPool (vk, device, queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer (vk, device, *cmdPool));
+
+ GraphicsPipelineBuilder pipelineBuilder;
+ pipelineBuilder
+ .setRenderSize(renderSize)
+ .setShader(vk, device, VK_SHADER_STAGE_VERTEX_BIT, m_context.getBinaryCollection().get("vert"), pSpecInfo)
+ .setShader(vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, m_context.getBinaryCollection().get("frag"), pSpecInfo);
+
+ if ((m_stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) || (m_stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
+ pipelineBuilder
+ .setShader(vk, device, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, m_context.getBinaryCollection().get("tesc"), pSpecInfo)
+ .setShader(vk, device, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, m_context.getBinaryCollection().get("tese"), pSpecInfo);
+
+ if (m_stage == VK_SHADER_STAGE_GEOMETRY_BIT)
+ pipelineBuilder
+ .setShader(vk, device, VK_SHADER_STAGE_GEOMETRY_BIT, m_context.getBinaryCollection().get("geom"), pSpecInfo);
+
+ const Unique<VkPipeline> pipeline (pipelineBuilder.build(vk, device, *pipelineLayout, *renderPass));
+
+ // Draw commands
+
+ const VkRect2D renderArea = {
+ makeOffset2D(0, 0),
+ makeExtent2D(renderSize.x(), renderSize.y()),
+ };
+ const tcu::Vec4 clearColor (0.0f, 0.0f, 0.0f, 1.0f);
+ const VkDeviceSize vertexBufferOffset = 0ull;
+
+ beginCommandBuffer(vk, *cmdBuffer);
+
+ {
+ const VkImageSubresourceRange imageFullSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
+ const VkImageMemoryBarrier barrierColorAttachmentSetInitialLayout = makeImageMemoryBarrier(
+ 0u, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ *colorImage, imageFullSubresourceRange);
+
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0u,
+ 0u, DE_NULL, 0u, DE_NULL, 1u, &barrierColorAttachmentSetInitialLayout);
+ }
+
+ beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderArea, clearColor);
+
+ vk.cmdBindPipeline (*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+ vk.cmdBindVertexBuffers (*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
+
+ vk.cmdDraw(*cmdBuffer, numVertices, 1u, 0u, 0u);
+ endRenderPass(vk, *cmdBuffer);
+
+ {
+ const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
+ VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, m_ssboSize);
+
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
+ 0u, DE_NULL, 1u, &shaderWriteBarrier, 0u, DE_NULL);
+ }
+
+ endCommandBuffer(vk, *cmdBuffer);
+ submitCommandsAndWait(vk, device, queue, *cmdBuffer);
+
+ // Verify results
+
+ const Allocation& resultAlloc = resultBuffer.getAllocation();
+ invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), m_ssboSize);
+
+ if (verifyValues(m_context.getTestContext().getLog(), resultAlloc.getHostPtr(), m_expectedValues))
+ return tcu::TestStatus::pass("Success");
+ else
+ return tcu::TestStatus::fail("Values did not match");
+}
+
+FeatureFlags getShaderStageRequirements (const VkShaderStageFlags stageFlags)
+{
+ FeatureFlags features = (FeatureFlags)0;
+
+ if (((stageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0) || ((stageFlags & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0))
+ features |= FEATURE_TESSELLATION_SHADER;
+
+ if ((stageFlags & VK_SHADER_STAGE_GEOMETRY_BIT) != 0)
+ features |= FEATURE_GEOMETRY_SHADER;
+
+ // All tests use SSBO writes to read back results.
+ if ((stageFlags & VK_SHADER_STAGE_ALL_GRAPHICS) != 0)
+ if ((stageFlags & VK_SHADER_STAGE_FRAGMENT_BIT) != 0)
+ features |= FEATURE_FRAGMENT_STORES_AND_ATOMICS;
+ else
+ features |= FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS;
+
+ return features;
+}
+
+TestInstance* SpecConstantTest::createInstance (Context& context) const
+{
+ requireFeatures(context.getInstanceInterface(), context.getPhysicalDevice(), m_caseDef.requirements | getShaderStageRequirements(m_stage));
+
+ if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
+ return new ComputeTestInstance(context, m_caseDef.ssboSize, m_caseDef.specConstants, m_caseDef.expectedValues);
+ else
+ return new GraphicsTestInstance(context, m_caseDef.ssboSize, m_caseDef.specConstants, m_caseDef.expectedValues, m_stage);
+}
+
+//! Declare specialization constants but use them with default values.
+tcu::TestCaseGroup* createDefaultValueTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "default_value", "use default constant value"));
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "bool",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const bool sc0 = true;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const bool sc1 = false;")),
+ 8,
+ " bool r0;\n"
+ " bool r1;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n",
+ makeVector(OffsetValue(4, 0, makeValueBool32(true)),
+ OffsetValue(4, 4, makeValueBool32(false))),
+ (FeatureFlags)0,
+ },
+ {
+ "int",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = -3;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 17;")),
+ 8,
+ " int r0;\n"
+ " int r1;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(-3)),
+ OffsetValue(4, 4, makeValueInt32(17))),
+ (FeatureFlags)0,
+ },
+ {
+ "uint",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const uint sc0 = 42u;")),
+ 4,
+ " uint r0;\n",
+ "",
+ " sb_out.r0 = sc0;\n",
+ makeVector(OffsetValue(4, 0, makeValueUint32(42u))),
+ (FeatureFlags)0,
+ },
+ {
+ "float",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const float sc0 = 7.5;")),
+ 4,
+ " float r0;\n",
+ "",
+ " sb_out.r0 = sc0;\n",
+ makeVector(OffsetValue(4, 0, makeValueFloat32(7.5f))),
+ (FeatureFlags)0,
+ },
+ {
+ "double",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const double sc0 = 2.75LF;")),
+ 8,
+ " double r0;\n",
+ "",
+ " sb_out.r0 = sc0;\n",
+ makeVector(OffsetValue(8, 0, makeValueFloat64(2.75))),
+ FEATURE_SHADER_FLOAT_64,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, shaderStage, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Declare specialization constants and specify their values through API.
+tcu::TestCaseGroup* createBasicSpecializationTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "basic", "specialize a constant"));
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "bool",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const bool sc0 = true;", 4, makeValueBool32(true)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const bool sc1 = false;", 4, makeValueBool32(false)),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const bool sc2 = true;", 4, makeValueBool32(false)),
+ SpecConstant(4u, "layout(constant_id = ${ID}) const bool sc3 = false;", 4, makeValueBool32(true))),
+ 16,
+ " bool r0;\n"
+ " bool r1;\n"
+ " bool r2;\n"
+ " bool r3;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n"
+ " sb_out.r2 = sc2;\n"
+ " sb_out.r3 = sc3;\n",
+ makeVector(OffsetValue(4, 0, makeValueBool32(true)),
+ OffsetValue(4, 4, makeValueBool32(false)),
+ OffsetValue(4, 8, makeValueBool32(false)),
+ OffsetValue(4, 12, makeValueBool32(true))),
+ (FeatureFlags)0,
+ },
+ {
+ "int",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = -3;", 4, makeValueInt32(33)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 91;"),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const int sc2 = 17;", 4, makeValueInt32(-15))),
+ 12,
+ " int r0;\n"
+ " int r1;\n"
+ " int r2;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n"
+ " sb_out.r2 = sc2;\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(33)),
+ OffsetValue(4, 4, makeValueInt32(91)),
+ OffsetValue(4, 8, makeValueInt32(-15))),
+ (FeatureFlags)0,
+ },
+ {
+ "uint",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const uint sc0 = 42u;", 4, makeValueUint32(97u)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const uint sc1 = 7u;")),
+ 8,
+ " uint r0;\n"
+ " uint r1;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n",
+ makeVector(OffsetValue(4, 0, makeValueUint32(97u)),
+ OffsetValue(4, 4, makeValueUint32(7u))),
+ (FeatureFlags)0,
+ },
+ {
+ "float",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const float sc0 = 7.5;", 4, makeValueFloat32(15.75f)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const float sc1 = 1.125;")),
+ 8,
+ " float r0;\n"
+ " float r1;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n",
+ makeVector(OffsetValue(4, 0, makeValueFloat32(15.75f)),
+ OffsetValue(4, 4, makeValueFloat32(1.125f))),
+ (FeatureFlags)0,
+ },
+ {
+ "double",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const double sc0 = 2.75LF;", 8, makeValueFloat64(22.5)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const double sc1 = 9.25LF;")),
+ 16,
+ " double r0;\n"
+ " double r1;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n",
+ makeVector(OffsetValue(8, 0, makeValueFloat64(22.5)),
+ OffsetValue(8, 8, makeValueFloat64(9.25))),
+ FEATURE_SHADER_FLOAT_64,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, shaderStage, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Test various input data sizes, e.g. short -> int.
+tcu::TestCaseGroup* createDataSizeTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "data_size", "different input data sizes"));
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "bool",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const bool sc0 = false;", 1, makeValueBool32(true)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const bool sc1 = false;", 2, makeValueBool32(true)),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const bool sc2 = false;", 4, makeValueBool32(true))),
+ 12,
+ " bool r0;\n"
+ " bool r1;\n"
+ " bool r2;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n"
+ " sb_out.r2 = sc2;\n",
+ makeVector(OffsetValue(4, 0, makeValueBool32(true)),
+ OffsetValue(4, 4, makeValueBool32(true)),
+ OffsetValue(4, 8, makeValueBool32(true))),
+ (FeatureFlags)0,
+ },
+ {
+ "int",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 0;", 1, makeValueInt32(0xffffff0a)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 0;", 2, makeValueInt32(0xffff0a0b)),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const int sc2 = 0;", 4, makeValueInt32(0x0a0b0c0d))),
+ 12,
+ " int r0;\n"
+ " int r1;\n"
+ " int r2;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n"
+ " sb_out.r2 = sc2;\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(0x0a)),
+ OffsetValue(4, 4, makeValueInt32(0x0a0b)),
+ OffsetValue(4, 8, makeValueInt32(0x0a0b0c0d))),
+ (FeatureFlags)0,
+ },
+ {
+ "uint",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const uint sc0 = 0u;", 1, makeValueUint32(0xffffffa0)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const uint sc1 = 0u;", 2, makeValueUint32(0xffffa0b0)),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const uint sc2 = 0u;", 4, makeValueUint32(0xa0b0c0d0))),
+ 12,
+ " uint r0;\n"
+ " uint r1;\n"
+ " uint r2;\n",
+ "",
+ " sb_out.r0 = sc0;\n"
+ " sb_out.r1 = sc1;\n"
+ " sb_out.r2 = sc2;\n",
+ makeVector(OffsetValue(4, 0, makeValueUint32(0xa0)),
+ OffsetValue(4, 4, makeValueUint32(0xa0b0)),
+ OffsetValue(4, 8, makeValueUint32(0xa0b0c0d0))),
+ (FeatureFlags)0,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, shaderStage, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Specify compute shader work group size through specialization constants.
+tcu::TestCaseGroup* createWorkGroupSizeTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "local_size", "work group size specialization"));
+
+ const deUint32 ssboSize = 16;
+ const std::string ssboDecl =
+ " uvec3 workGroupSize;\n"
+ " uint checksum;\n";
+ const std::string globalDecl = "shared uint count;\n";
+ const std::string mainCode =
+ " count = 0u;\n"
+ "\n"
+ " groupMemoryBarrier();\n"
+ " barrier();\n"
+ "\n"
+ " atomicAdd(count, 1u);\n"
+ "\n"
+ " groupMemoryBarrier();\n"
+ " barrier();\n"
+ "\n"
+ " sb_out.workGroupSize = gl_WorkGroupSize;\n"
+ " sb_out.checksum = count;\n";
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "x",
+ makeVector(SpecConstant(1u, "layout(local_size_x_id = ${ID}) in;", 4, makeValueUint32(7u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(7u)),
+ OffsetValue(4, 4, makeValueUint32(1u)),
+ OffsetValue(4, 8, makeValueUint32(1u)),
+ OffsetValue(4, 12, makeValueUint32(7u))),
+ (FeatureFlags)0,
+ },
+ {
+ "y",
+ makeVector(SpecConstant(1u, "layout(local_size_y_id = ${ID}) in;", 4, makeValueUint32(5u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(1u)),
+ OffsetValue(4, 4, makeValueUint32(5u)),
+ OffsetValue(4, 8, makeValueUint32(1u)),
+ OffsetValue(4, 12, makeValueUint32(5u))),
+ (FeatureFlags)0,
+ },
+ {
+ "z",
+ makeVector(SpecConstant(1u, "layout(local_size_z_id = ${ID}) in;", 4, makeValueUint32(3u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(1u)),
+ OffsetValue(4, 4, makeValueUint32(1u)),
+ OffsetValue(4, 8, makeValueUint32(3u)),
+ OffsetValue(4, 12, makeValueUint32(3u))),
+ (FeatureFlags)0,
+ },
+ {
+ "xy",
+ makeVector(SpecConstant(1u, "layout(local_size_x_id = ${ID}) in;", 4, makeValueUint32(6u)),
+ SpecConstant(2u, "layout(local_size_y_id = ${ID}) in;", 4, makeValueUint32(4u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(6u)),
+ OffsetValue(4, 4, makeValueUint32(4u)),
+ OffsetValue(4, 8, makeValueUint32(1u)),
+ OffsetValue(4, 12, makeValueUint32(6u * 4u))),
+ (FeatureFlags)0,
+ },
+ {
+ "xz",
+ makeVector(SpecConstant(1u, "layout(local_size_x_id = ${ID}) in;", 4, makeValueUint32(3u)),
+ SpecConstant(2u, "layout(local_size_z_id = ${ID}) in;", 4, makeValueUint32(9u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(3u)),
+ OffsetValue(4, 4, makeValueUint32(1u)),
+ OffsetValue(4, 8, makeValueUint32(9u)),
+ OffsetValue(4, 12, makeValueUint32(3u * 9u))),
+ (FeatureFlags)0,
+ },
+ {
+ "yz",
+ makeVector(SpecConstant(1u, "layout(local_size_y_id = ${ID}) in;", 4, makeValueUint32(2u)),
+ SpecConstant(2u, "layout(local_size_z_id = ${ID}) in;", 4, makeValueUint32(5u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(1u)),
+ OffsetValue(4, 4, makeValueUint32(2u)),
+ OffsetValue(4, 8, makeValueUint32(5u)),
+ OffsetValue(4, 12, makeValueUint32(2u * 5u))),
+ (FeatureFlags)0,
+ },
+ {
+ "xyz",
+ makeVector(SpecConstant(1u, "layout(local_size_x_id = ${ID}) in;", 4, makeValueUint32(3u)),
+ SpecConstant(2u, "layout(local_size_y_id = ${ID}) in;", 4, makeValueUint32(5u)),
+ SpecConstant(3u, "layout(local_size_z_id = ${ID}) in;", 4, makeValueUint32(7u))),
+ ssboSize, ssboDecl, globalDecl, mainCode,
+ makeVector(OffsetValue(4, 0, makeValueUint32(3u)),
+ OffsetValue(4, 4, makeValueUint32(5u)),
+ OffsetValue(4, 8, makeValueUint32(7u)),
+ OffsetValue(4, 12, makeValueUint32(3u * 5u * 7u))),
+ (FeatureFlags)0,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, VK_SHADER_STAGE_COMPUTE_BIT, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Override a built-in variable with specialization constant value.
+tcu::TestCaseGroup* createBuiltInOverrideTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "builtin", "built-in override"));
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "default",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) gl_MaxImageUnits;")),
+ 4,
+ " bool ok;\n",
+ "",
+ " sb_out.ok = (gl_MaxImageUnits >= 8);\n", // implementation defined, 8 is the minimum
+ makeVector(OffsetValue(4, 0, makeValueBool32(true))),
+ (FeatureFlags)0,
+ },
+ {
+ "specialized",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) gl_MaxImageUnits;", 4, makeValueInt32(12))),
+ 4,
+ " int maxImageUnits;\n",
+ "",
+ " sb_out.maxImageUnits = gl_MaxImageUnits;\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(12))),
+ (FeatureFlags)0,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, shaderStage, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Specialization constants used in expressions.
+tcu::TestCaseGroup* createExpressionTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup (new tcu::TestCaseGroup(testCtx, "expression", "specialization constants usage in expressions"));
+
+ static const CaseDefinition defs[] =
+ {
+ {
+ "spec_const_expression",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 2;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 3;", 4, makeValueInt32(5))),
+ 4,
+ " int result;\n",
+
+ "const int expr0 = sc0 + 1;\n"
+ "const int expr1 = sc0 + sc1;\n",
+
+ " sb_out.result = expr0 + expr1;\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(10))),
+ (FeatureFlags)0,
+ },
+ {
+ "array_size",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 1;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 2;", 4, makeValueInt32(3))),
+ 16,
+ " int r0;\n"
+ " int r1[3];\n",
+
+ "",
+
+ " int a0[sc0];\n"
+ " int a1[sc1];\n"
+ "\n"
+ " for (int i = 0; i < sc0; ++i)\n"
+ " a0[i] = sc0 - i;\n"
+ " for (int i = 0; i < sc1; ++i)\n"
+ " a1[i] = sc1 - i;\n"
+ "\n"
+ " sb_out.r0 = a0[0];\n"
+ " for (int i = 0; i < sc1; ++i)\n"
+ " sb_out.r1[i] = a1[i];\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(1)),
+ OffsetValue(4, 4, makeValueInt32(3)),
+ OffsetValue(4, 8, makeValueInt32(2)),
+ OffsetValue(4, 12, makeValueInt32(1))),
+ (FeatureFlags)0,
+ },
+ {
+ "array_size_expression",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 3;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 5;", 4, makeValueInt32(7))),
+ 8,
+ " int r0;\n"
+ " int r1;\n",
+
+ "",
+
+ " int a0[sc0 + 3];\n"
+ " int a1[sc0 + sc1];\n"
+ "\n"
+ " const int size0 = sc0 + 3;\n"
+ " const int size1 = sc0 + sc1;\n"
+ "\n"
+ " for (int i = 0; i < size0; ++i)\n"
+ " a0[i] = 3 - i;\n"
+ " for (int i = 0; i < size1; ++i)\n"
+ " a1[i] = 5 - i;\n"
+ "\n"
+ " sb_out.r0 = a0[size0 - 1];\n"
+ " sb_out.r1 = a1[size1 - 1];\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(-2)),
+ OffsetValue(4, 4, makeValueInt32(-4))),
+ (FeatureFlags)0,
+ },
+ {
+ "array_size_spec_const_expression",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 3;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 5;", 4, makeValueInt32(7))),
+ 8,
+ " int r0;\n"
+ " int r1;\n",
+
+ "",
+
+ " const int size0 = sc0 + 3;\n"
+ " const int size1 = sc0 + sc1;\n"
+ "\n"
+ " int a0[size0];\n"
+ " int a1[size1];\n"
+ "\n"
+ " for (int i = 0; i < size0; ++i)\n"
+ " a0[i] = 3 - i;\n"
+ " for (int i = 0; i < size1; ++i)\n"
+ " a1[i] = 5 - i;\n"
+ "\n"
+ " sb_out.r0 = a0[size0 - 1];\n"
+ " sb_out.r1 = a1[size1 - 1];\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(-2)),
+ OffsetValue(4, 4, makeValueInt32(-4))),
+ (FeatureFlags)0,
+ },
+ {
+ "array_size_length",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 1;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 2;", 4, makeValueInt32(4))),
+ 8,
+ " int r0;\n"
+ " int r1;\n",
+
+ "",
+
+ " int a0[sc0];\n"
+ " int a1[sc1];\n"
+ "\n"
+ " sb_out.r0 = a0.length();\n"
+ " sb_out.r1 = a1.length();\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(1)),
+ OffsetValue(4, 4, makeValueInt32(4))),
+ (FeatureFlags)0,
+ },
+ {
+ "array_size_pass_to_function",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 3;"),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const int sc1 = 1;", 4, makeValueInt32(3))),
+ 4,
+ " int result;\n",
+
+ "int sumArrays (int a0[sc0], int a1[sc1])\n"
+ "{\n"
+ " int sum = 0;\n"
+ " for (int i = 0; (i < sc0) && (i < sc1); ++i)\n"
+ " sum += a0[i] + a1[i];\n"
+ " return sum;\n"
+ "}\n",
+
+ " int a0[sc0];\n"
+ " int a1[sc1];\n"
+ "\n"
+ " for (int i = 0; i < sc0; ++i)\n"
+ " a0[i] = i + 1;\n"
+ " for (int i = 0; i < sc1; ++i)\n"
+ " a1[i] = i + 2;\n"
+ "\n"
+ " sb_out.result = sumArrays(a0, a1);\n",
+ makeVector(OffsetValue(4, 0, makeValueInt32(15))),
+ (FeatureFlags)0,
+ },
+ };
+
+ for (int defNdx = 0; defNdx < DE_LENGTH_OF_ARRAY(defs); ++defNdx)
+ testGroup->addChild(new SpecConstantTest(testCtx, shaderStage, defs[defNdx]));
+
+ return testGroup.release();
+}
+
+//! Helper functions internal to make*CompositeCaseDefinition functions.
+namespace composite_case_internal
+{
+
+//! Generate a string like this: "1, 2, sc0, 4" or "true, true, sc0"
+//! castToType = true is useful when type requires more initializer values than we are providing, e.g.:
+//! vec2(1), vec2(sc0), vec(3)
+std::string generateInitializerListWithSpecConstant (const glu::DataType type,
+ const bool castToType,
+ const int idxBegin,
+ const int idxEnd,
+ const std::string& specConstName,
+ const int specConstNdx)
+{
+ std::ostringstream str;
+
+ for (int i = idxBegin; i < idxEnd; ++i)
+ {
+ const std::string iVal = (i == specConstNdx ? specConstName : glu::getDataTypeScalarType(type) == glu::TYPE_BOOL ? "true" : de::toString(i + 1));
+ str << (i != idxBegin ? ", " : "") << (castToType ? de::toString(glu::getDataTypeName(type)) + "(" + iVal + ")" : iVal);
+ }
+
+ return str.str();
+}
+
+std::string generateArrayConstructorString (const glu::DataType elemType,
+ const int size1,
+ const int size2,
+ const std::string& specConstName,
+ const int specConstNdx)
+{
+ const bool isArrayOfArray = (size2 > 0);
+ const bool doCast = (!isDataTypeScalar(elemType));
+
+ std::ostringstream arrayCtorExpr;
+
+ if (isArrayOfArray)
+ {
+ const std::string padding (36, ' ');
+ int idxBegin = 0;
+ int idxEnd = size2;
+
+ for (int iterNdx = 0; iterNdx < size1; ++iterNdx)
+ {
+ // Open sub-array ctor
+ arrayCtorExpr << (iterNdx != 0 ? ",\n" + padding : "") << glu::getDataTypeName(elemType) << "[" << size2 << "](";
+
+ // Sub-array constructor elements
+ arrayCtorExpr << generateInitializerListWithSpecConstant(elemType, doCast, idxBegin, idxEnd, specConstName, specConstNdx);
+
+ // Close sub-array ctor, move to next range
+ arrayCtorExpr << ")";
+
+ idxBegin += size2;
+ idxEnd += size2;
+ }
+ }
+ else
+ {
+ // Array constructor elements
+ arrayCtorExpr << generateInitializerListWithSpecConstant(elemType, doCast, 0, size1, specConstName, specConstNdx);
+ }
+
+ return arrayCtorExpr.str();
+}
+
+inline GenericValue makeValue (const glu::DataType type, const int specValue)
+{
+ if (type == glu::TYPE_DOUBLE)
+ return makeValueFloat64(static_cast<double>(specValue));
+ else if (type == glu::TYPE_FLOAT)
+ return makeValueFloat32(static_cast<float>(specValue));
+ else
+ return makeValueInt32(specValue);
+}
+
+deUint32 getDataTypeScalarSizeBytes (const glu::DataType dataType)
+{
+ switch (getDataTypeScalarType(dataType))
+ {
+ case glu::TYPE_FLOAT:
+ case glu::TYPE_INT:
+ case glu::TYPE_UINT:
+ case glu::TYPE_BOOL:
+ return 4;
+
+ case glu::TYPE_DOUBLE:
+ return 8;
+
+ default:
+ DE_ASSERT(false);
+ return 0;
+ }
+}
+
+//! This applies to matrices/vectors/array cases. dataType must be a basic type.
+std::vector<OffsetValue> computeExpectedValues (const int specValue, const glu::DataType dataType, const int numCombinations)
+{
+ DE_ASSERT(glu::isDataTypeScalar(dataType));
+
+ std::vector<OffsetValue> expectedValues;
+
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ {
+ int sum = 0;
+ for (int i = 0; i < numCombinations; ++i)
+ sum += (i == combNdx ? specValue : dataType == glu::TYPE_BOOL ? 1 : (i + 1));
+
+ const int dataSize = getDataTypeScalarSizeBytes(dataType);
+ expectedValues.push_back(OffsetValue(dataSize, dataSize * combNdx, makeValue(dataType, sum)));
+ }
+
+ return expectedValues;
+}
+
+inline std::string getFirstDataElementSubscriptString (const glu::DataType type)
+{
+ // Grab the first element of a matrix/vector, if dealing with non-basic types.
+ return (isDataTypeMatrix(type) ? "[0][0]" : isDataTypeVector(type) ? "[0]" : "");
+}
+
+//! This code will go into the main function.
+std::string generateShaderChecksumComputationCode (const glu::DataType elemType,
+ const std::string& varName,
+ const std::string& accumType,
+ const int size1,
+ const int size2,
+ const int numCombinations)
+{
+ std::ostringstream mainCode;
+
+ // Generate main code to calculate checksums for each array
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ mainCode << " "<< accumType << " sum_" << varName << combNdx << " = " << accumType << "(0);\n";
+
+ if (size2 > 0)
+ {
+ mainCode << "\n"
+ << " for (int i = 0; i < " << size1 << "; ++i)\n"
+ << " for (int j = 0; j < " << size2 << "; ++j)\n"
+ << " {\n";
+
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ mainCode << " sum_" << varName << combNdx << " += " << accumType << "("
+ << varName << combNdx << "[i][j]" << getFirstDataElementSubscriptString(elemType) << ");\n";
+ }
+ else
+ {
+ mainCode << "\n"
+ << " for (int i = 0; i < " << size1 << "; ++i)\n"
+ << " {\n";
+
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ mainCode << " sum_" << varName << combNdx << " += " << accumType << "("
+ << varName << combNdx << "[i]" << getFirstDataElementSubscriptString(elemType) << ");\n";
+ }
+
+ mainCode << " }\n"
+ << "\n";
+
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ mainCode << " sb_out.result[" << combNdx << "] = sum_" << varName << combNdx << ";\n";
+
+ return mainCode.str();
+}
+
+SpecConstant makeSpecConstant (const std::string specConstName, const deUint32 specConstId, const glu::DataType type, const int specValue)
+{
+ DE_ASSERT(glu::isDataTypeScalar(type));
+
+ const std::string typeName(glu::getDataTypeName(type));
+
+ return SpecConstant(
+ specConstId,
+ "layout(constant_id = ${ID}) const " + typeName + " " + specConstName + " = " + typeName + "(1);",
+ getDataTypeScalarSizeBytes(type), makeValue(type, specValue));
+}
+
+} // composite_case_internal ns
+
+//! Generate a CaseDefinition for a composite test using a matrix or vector (a 1-column matrix)
+CaseDefinition makeMatrixVectorCompositeCaseDefinition (const glu::DataType type)
+{
+ using namespace composite_case_internal;
+
+ DE_ASSERT(!glu::isDataTypeScalar(type));
+
+ const std::string varName = (glu::isDataTypeMatrix(type) ? "m" : "v");
+ const int numCombinations = getDataTypeScalarSize(type);
+ const glu::DataType scalarType = glu::getDataTypeScalarType(type);
+ const std::string typeName = glu::getDataTypeName(type);
+
+ std::ostringstream globalCode;
+ {
+ // Build N matrices/vectors with specialization constant inserted at various locations in the constructor.
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ globalCode << "const " << typeName << " " << varName << combNdx << " = " << typeName << "("
+ << generateInitializerListWithSpecConstant(type, false, 0, numCombinations, "sc0", combNdx) << ");\n";
+ }
+
+ const bool isBoolElement = (scalarType == glu::TYPE_BOOL);
+ const int specValue = (isBoolElement ? 0 : 42);
+ const std::string accumType = glu::getDataTypeName(isBoolElement ? glu::TYPE_INT : scalarType);
+
+ const int size1 = glu::isDataTypeMatrix(type) ? glu::getDataTypeMatrixNumColumns(type) : glu::getDataTypeNumComponents(type);
+ const int size2 = glu::isDataTypeMatrix(type) ? glu::getDataTypeMatrixNumRows(type) : 0;
+
+ const CaseDefinition def =
+ {
+ typeName,
+ makeVector(makeSpecConstant("sc0", 1u, scalarType, specValue)),
+ static_cast<VkDeviceSize>(getDataTypeScalarSizeBytes(type) * numCombinations),
+ " " + accumType + " result[" + de::toString(numCombinations) + "];\n",
+ globalCode.str(),
+ generateShaderChecksumComputationCode(scalarType, varName, accumType, size1, size2, numCombinations),
+ computeExpectedValues(specValue, scalarType, numCombinations),
+ (scalarType == glu::TYPE_DOUBLE ? FEATURE_SHADER_FLOAT_64 : (FeatureFlags)0),
+ };
+ return def;
+}
+
+//! Generate a CaseDefinition for a composite test using an array, or an array of array.
+//! If (size1, size2) = (N, 0) -> type array[N]
+//! = (N, M) -> type array[N][M]
+CaseDefinition makeArrayCompositeCaseDefinition (const glu::DataType elemType, const int size1, const int size2 = 0)
+{
+ using namespace composite_case_internal;
+
+ DE_ASSERT(size1 > 0);
+
+ const bool isArrayOfArray = (size2 > 0);
+ const std::string varName = "a";
+ const std::string arraySizeDecl = "[" + de::toString(size1) + "]" + (isArrayOfArray ? "[" + de::toString(size2) + "]" : "");
+ const int numCombinations = (isArrayOfArray ? size1 * size2 : size1);
+ const std::string elemTypeName (glu::getDataTypeName(elemType));
+
+ std::ostringstream globalCode;
+ {
+ // Create several arrays with specialization constant inserted in different positions.
+ for (int combNdx = 0; combNdx < numCombinations; ++combNdx)
+ globalCode << "const " << elemTypeName << " " << varName << combNdx << arraySizeDecl << " = "
+ << elemTypeName << arraySizeDecl << "(" << generateArrayConstructorString(elemType, size1, size2, "sc0", combNdx) << ");\n";
+ }
+
+ const glu::DataType scalarType = glu::getDataTypeScalarType(elemType);
+ const bool isBoolData = (scalarType == glu::TYPE_BOOL);
+ const int specValue = (isBoolData ? 0 : 19);
+ const std::string caseName = (isArrayOfArray ? "array_" : "") + elemTypeName;
+ const std::string accumType = (glu::getDataTypeName(isBoolData ? glu::TYPE_INT : scalarType));
+
+ const CaseDefinition def =
+ {
+ caseName,
+ makeVector(makeSpecConstant("sc0", 1u, scalarType, specValue)),
+ static_cast<VkDeviceSize>(getDataTypeScalarSizeBytes(elemType) * numCombinations),
+ " " + accumType + " result[" + de::toString(numCombinations) + "];\n",
+ globalCode.str(),
+ generateShaderChecksumComputationCode(elemType, varName, accumType, size1, size2, numCombinations),
+ computeExpectedValues(specValue, scalarType, numCombinations),
+ (scalarType == glu::TYPE_DOUBLE ? FEATURE_SHADER_FLOAT_64 : (FeatureFlags)0),
+ };
+ return def;
+}
+
+//! A basic struct case, where one member is a specialization constant, or a specialization constant composite
+//! (a matrix/vector with a spec. const. element).
+CaseDefinition makeStructCompositeCaseDefinition (const glu::DataType memberType)
+{
+ using namespace composite_case_internal;
+
+ std::ostringstream globalCode;
+ {
+ globalCode << "struct Data {\n"
+ << " int i;\n"
+ << " float f;\n"
+ << " bool b;\n"
+ << " " << glu::getDataTypeName(memberType) << " sc;\n"
+ << " uint ui;\n"
+ << "};\n"
+ << "\n"
+ << "const Data s0 = Data(3, 2.0, true, " << glu::getDataTypeName(memberType) << "(sc0), 8u);\n";
+ }
+
+ const glu::DataType scalarType = glu::getDataTypeScalarType(memberType);
+ const bool isBoolData = (scalarType == glu::TYPE_BOOL);
+ const int specValue = (isBoolData ? 0 : 23);
+ const int checksum = (3 + 2 + 1 + specValue + 8); // matches the shader code
+ const glu::DataType accumType = (isBoolData ? glu::TYPE_INT : scalarType);
+ const std::string accumTypeStr = glu::getDataTypeName(accumType);
+
+ std::ostringstream mainCode;
+ {
+ mainCode << " " << accumTypeStr << " sum_s0 = " << accumTypeStr << "(0);\n"
+ << "\n"
+ << " sum_s0 += " << accumTypeStr << "(s0.i);\n"
+ << " sum_s0 += " << accumTypeStr << "(s0.f);\n"
+ << " sum_s0 += " << accumTypeStr << "(s0.b);\n"
+ << " sum_s0 += " << accumTypeStr << "(s0.sc" << getFirstDataElementSubscriptString(memberType) << ");\n"
+ << " sum_s0 += " << accumTypeStr << "(s0.ui);\n"
+ << "\n"
+ << " sb_out.result = sum_s0;\n";
+ }
+
+ const std::string caseName = glu::getDataTypeName(memberType);
+
+ const CaseDefinition def =
+ {
+ caseName,
+ makeVector(makeSpecConstant("sc0", 1u, scalarType, specValue)),
+ getDataTypeScalarSizeBytes(accumType),
+ " " + accumTypeStr + " result;\n",
+ globalCode.str(),
+ mainCode.str(),
+ makeVector(OffsetValue(getDataTypeScalarSizeBytes(memberType), 0, makeValue(scalarType, checksum))),
+ (scalarType == glu::TYPE_DOUBLE ? FEATURE_SHADER_FLOAT_64 : (FeatureFlags)0),
+ };
+ return def;
+}
+
+//! Specialization constants used in composites.
+tcu::TestCaseGroup* createCompositeTests (tcu::TestContext& testCtx, const VkShaderStageFlagBits shaderStage)
+{
+ de::MovePtr<tcu::TestCaseGroup> compositeTests (new tcu::TestCaseGroup(testCtx, "composite", "specialization constants usage in composite types"));
+
+ // Vectors
+ {
+ de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "vector", ""));
+
+ const glu::DataType types[] =
+ {
+ glu::TYPE_FLOAT_VEC2,
+ glu::TYPE_FLOAT_VEC3,
+ glu::TYPE_FLOAT_VEC4,
+
+ glu::TYPE_DOUBLE_VEC2,
+ glu::TYPE_DOUBLE_VEC3,
+ glu::TYPE_DOUBLE_VEC4,
+
+ glu::TYPE_BOOL_VEC2,
+ glu::TYPE_BOOL_VEC3,
+ glu::TYPE_BOOL_VEC4,
+
+ glu::TYPE_INT_VEC2,
+ glu::TYPE_INT_VEC3,
+ glu::TYPE_INT_VEC4,
+
+ glu::TYPE_UINT_VEC2,
+ glu::TYPE_UINT_VEC3,
+ glu::TYPE_UINT_VEC4,
+ };
+ for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(types); ++typeNdx)
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, makeMatrixVectorCompositeCaseDefinition(types[typeNdx])));
+
+ compositeTests->addChild(group.release());
+ }
+
+ // Matrices
+ {
+ de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "matrix", ""));
+
+ const glu::DataType types[] =
+ {
+ glu::TYPE_FLOAT_MAT2,
+ glu::TYPE_FLOAT_MAT2X3,
+ glu::TYPE_FLOAT_MAT2X4,
+ glu::TYPE_FLOAT_MAT3X2,
+ glu::TYPE_FLOAT_MAT3,
+ glu::TYPE_FLOAT_MAT3X4,
+ glu::TYPE_FLOAT_MAT4X2,
+ glu::TYPE_FLOAT_MAT4X3,
+ glu::TYPE_FLOAT_MAT4,
+
+ glu::TYPE_DOUBLE_MAT2,
+ glu::TYPE_DOUBLE_MAT2X3,
+ glu::TYPE_DOUBLE_MAT2X4,
+ glu::TYPE_DOUBLE_MAT3X2,
+ glu::TYPE_DOUBLE_MAT3,
+ glu::TYPE_DOUBLE_MAT3X4,
+ glu::TYPE_DOUBLE_MAT4X2,
+ glu::TYPE_DOUBLE_MAT4X3,
+ glu::TYPE_DOUBLE_MAT4,
+ };
+ for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(types); ++typeNdx)
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, makeMatrixVectorCompositeCaseDefinition(types[typeNdx])));
+
+ compositeTests->addChild(group.release());
+ }
+
+ const glu::DataType allTypes[] =
+ {
+ glu::TYPE_FLOAT,
+ glu::TYPE_FLOAT_VEC2,
+ glu::TYPE_FLOAT_VEC3,
+ glu::TYPE_FLOAT_VEC4,
+ glu::TYPE_FLOAT_MAT2,
+ glu::TYPE_FLOAT_MAT2X3,
+ glu::TYPE_FLOAT_MAT2X4,
+ glu::TYPE_FLOAT_MAT3X2,
+ glu::TYPE_FLOAT_MAT3,
+ glu::TYPE_FLOAT_MAT3X4,
+ glu::TYPE_FLOAT_MAT4X2,
+ glu::TYPE_FLOAT_MAT4X3,
+ glu::TYPE_FLOAT_MAT4,
+
+ glu::TYPE_DOUBLE,
+ glu::TYPE_DOUBLE_VEC2,
+ glu::TYPE_DOUBLE_VEC3,
+ glu::TYPE_DOUBLE_VEC4,
+ glu::TYPE_DOUBLE_MAT2,
+ glu::TYPE_DOUBLE_MAT2X3,
+ glu::TYPE_DOUBLE_MAT2X4,
+ glu::TYPE_DOUBLE_MAT3X2,
+ glu::TYPE_DOUBLE_MAT3,
+ glu::TYPE_DOUBLE_MAT3X4,
+ glu::TYPE_DOUBLE_MAT4X2,
+ glu::TYPE_DOUBLE_MAT4X3,
+ glu::TYPE_DOUBLE_MAT4,
+
+ glu::TYPE_INT,
+ glu::TYPE_INT_VEC2,
+ glu::TYPE_INT_VEC3,
+ glu::TYPE_INT_VEC4,
+
+ glu::TYPE_UINT,
+ glu::TYPE_UINT_VEC2,
+ glu::TYPE_UINT_VEC3,
+ glu::TYPE_UINT_VEC4,
+
+ glu::TYPE_BOOL,
+ glu::TYPE_BOOL_VEC2,
+ glu::TYPE_BOOL_VEC3,
+ glu::TYPE_BOOL_VEC4,
+ };
+
+ // Array cases
+ {
+ de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "array", ""));
+
+ // Array of T
+ for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(allTypes); ++typeNdx)
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, makeArrayCompositeCaseDefinition(allTypes[typeNdx], 3)));
+
+ // Array of array of T
+ for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(allTypes); ++typeNdx)
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, makeArrayCompositeCaseDefinition(allTypes[typeNdx], 3, 2)));
+
+ // Special case - array of struct
+ {
+ const int checksum = (3 + 2 + 1) + (1 + 5 + 1) + (1 + 2 + 0);
+ const CaseDefinition def =
+ {
+ "struct",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 1;", 4, makeValueInt32 (3)),
+ SpecConstant(2u, "layout(constant_id = ${ID}) const float sc1 = 1.0;", 4, makeValueFloat32(5.0f)),
+ SpecConstant(3u, "layout(constant_id = ${ID}) const bool sc2 = true;", 4, makeValueBool32 (false))),
+ 4,
+ " int result;\n",
+
+ "struct Data {\n"
+ " int x;\n"
+ " float y;\n"
+ " bool z;\n"
+ "};\n"
+ "\n"
+ "const Data a0[3] = Data[3](Data(sc0, 2.0, true), Data(1, sc1, true), Data(1, 2.0, sc2));\n",
+
+ " int sum_a0 = 0;\n"
+ "\n"
+ " for (int i = 0; i < 3; ++i)\n"
+ " sum_a0 += int(a0[i].x) + int(a0[i].y) + int(a0[i].z);\n"
+ "\n"
+ " sb_out.result = sum_a0;\n",
+
+ makeVector(OffsetValue(4, 0, makeValueInt32(checksum))),
+ (FeatureFlags)0,
+ };
+
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, def));
+ }
+
+ compositeTests->addChild(group.release());
+ }
+
+ // Struct cases
+ {
+ de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "struct", ""));
+
+ // Struct with one member being a specialization constant (or spec. const. composite) of a given type
+ for (int typeNdx = 0; typeNdx < DE_LENGTH_OF_ARRAY(allTypes); ++typeNdx)
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, makeStructCompositeCaseDefinition(allTypes[typeNdx])));
+
+ // Special case - struct with array
+ {
+ const int checksum = (1 + 2 + 31 + 4 + 0);
+ const CaseDefinition def =
+ {
+ "array",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const float sc0 = 1.0;", 4, makeValueFloat32(31.0f))),
+ 4,
+ " float result;\n",
+
+ "struct Data {\n"
+ " int i;\n"
+ " vec3 sc[3];\n"
+ " bool b;\n"
+ "};\n"
+ "\n"
+ "const Data s0 = Data(1, vec3[3](vec3(2.0), vec3(sc0), vec3(4.0)), false);\n",
+
+ " float sum_s0 = 0;\n"
+ "\n"
+ " sum_s0 += float(s0.i);\n"
+ " sum_s0 += float(s0.sc[0][0]);\n"
+ " sum_s0 += float(s0.sc[1][0]);\n"
+ " sum_s0 += float(s0.sc[2][0]);\n"
+ " sum_s0 += float(s0.b);\n"
+ "\n"
+ " sb_out.result = sum_s0;\n",
+
+ makeVector(OffsetValue(4, 0, makeValueFloat32(static_cast<float>(checksum)))),
+ (FeatureFlags)0,
+ };
+
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, def));
+ }
+
+ // Special case - struct of struct
+ {
+ const int checksum = (1 + 2 + 11 + 4 + 1);
+ const CaseDefinition def =
+ {
+ "struct",
+ makeVector(SpecConstant(1u, "layout(constant_id = ${ID}) const int sc0 = 1;", 4, makeValueInt32(11))),
+ 4,
+ " int result;\n",
+
+ "struct Nested {\n"
+ " vec2 v;\n"
+ " int sc;\n"
+ " float f;\n"
+ "};\n"
+ "\n"
+ "struct Data {\n"
+ " uint ui;\n"
+ " Nested s;\n"
+ " bool b;\n"
+ "};\n"
+ "\n"
+ "const Data s0 = Data(1u, Nested(vec2(2.0), sc0, 4.0), true);\n",
+
+ " int sum_s0 = 0;\n"
+ "\n"
+ " sum_s0 += int(s0.ui);\n"
+ " sum_s0 += int(s0.s.v[0]);\n"
+ " sum_s0 += int(s0.s.sc);\n"
+ " sum_s0 += int(s0.s.f);\n"
+ " sum_s0 += int(s0.b);\n"
+ "\n"
+ " sb_out.result = sum_s0;\n",
+
+ makeVector(OffsetValue(4, 0, makeValueInt32(checksum))),
+ (FeatureFlags)0,
+ };
+
+ group->addChild(new SpecConstantTest(testCtx, shaderStage, def));
+ }
+
+ compositeTests->addChild(group.release());
+ }
+
+ return compositeTests.release();
+}
+
+} // anonymous ns
+
+tcu::TestCaseGroup* createSpecConstantTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> allTests (new tcu::TestCaseGroup(testCtx, "spec_constant", "Specialization constants tests"));
+ de::MovePtr<tcu::TestCaseGroup> graphicsGroup (new tcu::TestCaseGroup(testCtx, "graphics", ""));
+
+ struct StageDef
+ {
+ tcu::TestCaseGroup* parentGroup;
+ const char* name;
+ VkShaderStageFlagBits stage;
+ };
+
+ static const StageDef stages[] =
+ {
+ { graphicsGroup.get(), "vertex", VK_SHADER_STAGE_VERTEX_BIT },
+ { graphicsGroup.get(), "fragment", VK_SHADER_STAGE_FRAGMENT_BIT },
+ { graphicsGroup.get(), "tess_control", VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT },
+ { graphicsGroup.get(), "tess_eval", VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT },
+ { graphicsGroup.get(), "geometry", VK_SHADER_STAGE_GEOMETRY_BIT },
+ { allTests.get(), "compute", VK_SHADER_STAGE_COMPUTE_BIT },
+ };
+
+ allTests->addChild(graphicsGroup.release());
+
+ for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stages); ++stageNdx)
+ {
+ const StageDef& stage = stages[stageNdx];
+ de::MovePtr<tcu::TestCaseGroup> stageGroup (new tcu::TestCaseGroup(testCtx, stage.name, ""));
+
+ stageGroup->addChild(createDefaultValueTests (testCtx, stage.stage));
+ stageGroup->addChild(createBasicSpecializationTests(testCtx, stage.stage));
+ stageGroup->addChild(createDataSizeTests (testCtx, stage.stage));
+ stageGroup->addChild(createBuiltInOverrideTests (testCtx, stage.stage));
+ stageGroup->addChild(createExpressionTests (testCtx, stage.stage));
+ stageGroup->addChild(createCompositeTests (testCtx, stage.stage));
+
+ if (stage.stage == VK_SHADER_STAGE_COMPUTE_BIT)
+ stageGroup->addChild(createWorkGroupSizeTests(testCtx));
+
+ stage.parentGroup->addChild(stageGroup.release());
+ }
+
+ return allTests.release();
+}
+
+} // pipeline
+} // vkt
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.hpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.hpp
new file mode 100644
index 000000000..cd3725b83
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantTests.hpp
@@ -0,0 +1,39 @@
+#ifndef _VKTPIPELINESPECCONSTANTTESTS_HPP
+#define _VKTPIPELINESPECCONSTANTTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline specialization constants tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+tcu::TestCaseGroup* createSpecConstantTests (tcu::TestContext& testCtx);
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINESPECCONSTANTTESTS_HPP
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.cpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.cpp
new file mode 100644
index 000000000..7549257a1
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.cpp
@@ -0,0 +1,639 @@
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline specialization constants test utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "vktPipelineSpecConstantUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include <vector>
+
+namespace vkt
+{
+namespace pipeline
+{
+
+using namespace vk;
+
+VkBufferCreateInfo makeBufferCreateInfo (const VkDeviceSize bufferSize,
+ const VkBufferUsageFlags usage)
+{
+ const VkBufferCreateInfo bufferCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkBufferCreateFlags)0, // VkBufferCreateFlags flags;
+ bufferSize, // VkDeviceSize size;
+ usage, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ };
+ return bufferCreateInfo;
+}
+
+VkBufferMemoryBarrier makeBufferMemoryBarrier (const VkAccessFlags srcAccessMask,
+ const VkAccessFlags dstAccessMask,
+ const VkBuffer buffer,
+ const VkDeviceSize offset,
+ const VkDeviceSize bufferSizeBytes)
+{
+ const VkBufferMemoryBarrier barrier =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ srcAccessMask, // VkAccessFlags srcAccessMask;
+ dstAccessMask, // VkAccessFlags dstAccessMask;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ buffer, // VkBuffer buffer;
+ offset, // VkDeviceSize offset;
+ bufferSizeBytes, // VkDeviceSize size;
+ };
+ return barrier;
+}
+
+VkImageMemoryBarrier makeImageMemoryBarrier (const VkAccessFlags srcAccessMask,
+ const VkAccessFlags dstAccessMask,
+ const VkImageLayout oldLayout,
+ const VkImageLayout newLayout,
+ const VkImage image,
+ const VkImageSubresourceRange subresourceRange)
+{
+ const VkImageMemoryBarrier barrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ srcAccessMask, // VkAccessFlags outputMask;
+ dstAccessMask, // VkAccessFlags inputMask;
+ oldLayout, // VkImageLayout oldLayout;
+ newLayout, // VkImageLayout newLayout;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ image, // VkImage image;
+ subresourceRange, // VkImageSubresourceRange subresourceRange;
+ };
+ return barrier;
+}
+
+Move<VkCommandPool> makeCommandPool (const DeviceInterface& vk, const VkDevice device, const deUint32 queueFamilyIndex)
+{
+ const VkCommandPoolCreateInfo info =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // VkCommandPoolCreateFlags flags;
+ queueFamilyIndex, // deUint32 queueFamilyIndex;
+ };
+ return createCommandPool(vk, device, &info);
+}
+
+Move<VkCommandBuffer> makeCommandBuffer (const DeviceInterface& vk, const VkDevice device, const VkCommandPool commandPool)
+{
+ const VkCommandBufferAllocateInfo info =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ commandPool, // VkCommandPool commandPool;
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
+ 1u, // deUint32 commandBufferCount;
+ };
+ return allocateCommandBuffer(vk, device, &info);
+}
+
+Move<VkDescriptorSet> makeDescriptorSet (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDescriptorPool descriptorPool,
+ const VkDescriptorSetLayout setLayout)
+{
+ const VkDescriptorSetAllocateInfo info =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ descriptorPool, // VkDescriptorPool descriptorPool;
+ 1u, // deUint32 descriptorSetCount;
+ &setLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ };
+ return allocateDescriptorSet(vk, device, &info);
+}
+
+Move<VkPipelineLayout> makePipelineLayout (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDescriptorSetLayout descriptorSetLayout)
+{
+ const VkPipelineLayoutCreateInfo info =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineLayoutCreateFlags)0, // VkPipelineLayoutCreateFlags flags;
+ 1u, // deUint32 setLayoutCount;
+ &descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL, // const VkPushConstantRange* pPushConstantRanges;
+ };
+ return createPipelineLayout(vk, device, &info);
+}
+
+Move<VkPipeline> makeComputePipeline (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkPipelineLayout pipelineLayout,
+ const VkShaderModule shaderModule,
+ const VkSpecializationInfo* specInfo)
+{
+ const VkPipelineShaderStageCreateInfo shaderStageInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
+ shaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ specInfo, // const VkSpecializationInfo* pSpecializationInfo;
+ };
+ const VkComputePipelineCreateInfo pipelineInfo =
+ {
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
+ shaderStageInfo, // VkPipelineShaderStageCreateInfo stage;
+ pipelineLayout, // VkPipelineLayout layout;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0, // deInt32 basePipelineIndex;
+ };
+ return createComputePipeline(vk, device, DE_NULL , &pipelineInfo);
+}
+
+VkImageCreateInfo makeImageCreateInfo (const tcu::IVec2& size, const VkFormat format, const VkImageUsageFlags usage)
+{
+ const VkImageCreateInfo imageInfo =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkImageCreateFlags)0, // VkImageCreateFlags flags;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ format, // VkFormat format;
+ makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
+ 1u, // uint32_t mipLevels;
+ 1u, // uint32_t arrayLayers;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ usage, // VkImageUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // uint32_t queueFamilyIndexCount;
+ DE_NULL, // const uint32_t* pQueueFamilyIndices;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
+ };
+ return imageInfo;
+}
+
+Move<VkImageView> makeImageView (const DeviceInterface& vk,
+ const VkDevice vkDevice,
+ const VkImage image,
+ const VkImageViewType viewType,
+ const VkFormat format)
+{
+ const VkImageViewCreateInfo imageViewParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkImageViewCreateFlags)0, // VkImageViewCreateFlags flags;
+ image, // VkImage image;
+ viewType, // VkImageViewType viewType;
+ format, // VkFormat format;
+ makeComponentMappingRGBA(), // VkComponentMapping components;
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u), // VkImageSubresourceRange subresourceRange;
+ };
+ return createImageView(vk, vkDevice, &imageViewParams);
+}
+
+void beginCommandBuffer (const DeviceInterface& vk, const VkCommandBuffer commandBuffer)
+{
+ const VkCommandBufferBeginInfo info =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
+ DE_NULL, // const VkCommandBufferInheritanceInfo* pInheritanceInfo;
+ };
+ VK_CHECK(vk.beginCommandBuffer(commandBuffer, &info));
+}
+
+void endCommandBuffer (const DeviceInterface& vk, const VkCommandBuffer commandBuffer)
+{
+ VK_CHECK(vk.endCommandBuffer(commandBuffer));
+}
+
+void submitCommandsAndWait (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkQueue queue,
+ const VkCommandBuffer commandBuffer)
+{
+ const VkFenceCreateInfo fenceInfo =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkFenceCreateFlags)0, // VkFenceCreateFlags flags;
+ };
+ const Unique<VkFence> fence(createFence(vk, device, &fenceInfo));
+
+ const VkSubmitInfo submitInfo =
+ {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // uint32_t waitSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pWaitSemaphores;
+ DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
+ 1u, // uint32_t commandBufferCount;
+ &commandBuffer, // const VkCommandBuffer* pCommandBuffers;
+ 0u, // uint32_t signalSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pSignalSemaphores;
+ };
+ VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
+ VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
+}
+
+void beginRenderPass (const DeviceInterface& vk,
+ const VkCommandBuffer commandBuffer,
+ const VkRenderPass renderPass,
+ const VkFramebuffer framebuffer,
+ const VkRect2D& renderArea,
+ const tcu::Vec4& clearColor)
+{
+ const VkClearValue clearValue = makeClearValueColor(clearColor);
+
+ const VkRenderPassBeginInfo renderPassBeginInfo = {
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ renderPass, // VkRenderPass renderPass;
+ framebuffer, // VkFramebuffer framebuffer;
+ renderArea, // VkRect2D renderArea;
+ 1u, // uint32_t clearValueCount;
+ &clearValue, // const VkClearValue* pClearValues;
+ };
+
+ vk.cmdBeginRenderPass(commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
+}
+
+void endRenderPass (const DeviceInterface& vk,
+ const VkCommandBuffer commandBuffer)
+{
+ vk.cmdEndRenderPass(commandBuffer);
+}
+
+Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkFormat colorFormat)
+{
+ const VkAttachmentDescription colorAttachmentDescription =
+ {
+ (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
+ colorFormat, // VkFormat format;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout;
+ };
+
+ const VkAttachmentReference colorAttachmentReference =
+ {
+ 0u, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
+ };
+
+ const VkAttachmentReference depthAttachmentReference =
+ {
+ VK_ATTACHMENT_UNUSED, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout layout;
+ };
+
+ const VkSubpassDescription subpassDescription =
+ {
+ (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
+ VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
+ 0u, // deUint32 inputAttachmentCount;
+ DE_NULL, // const VkAttachmentReference* pInputAttachments;
+ 1u, // deUint32 colorAttachmentCount;
+ &colorAttachmentReference, // const VkAttachmentReference* pColorAttachments;
+ DE_NULL, // const VkAttachmentReference* pResolveAttachments;
+ &depthAttachmentReference, // const VkAttachmentReference* pDepthStencilAttachment;
+ 0u, // deUint32 preserveAttachmentCount;
+ DE_NULL // const deUint32* pPreserveAttachments;
+ };
+
+ const VkRenderPassCreateInfo renderPassInfo =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
+ 1u, // deUint32 attachmentCount;
+ &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
+ 1u, // deUint32 subpassCount;
+ &subpassDescription, // const VkSubpassDescription* pSubpasses;
+ 0u, // deUint32 dependencyCount;
+ DE_NULL // const VkSubpassDependency* pDependencies;
+ };
+
+ return createRenderPass(vk, device, &renderPassInfo);
+}
+
+Move<VkFramebuffer> makeFramebuffer (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkRenderPass renderPass,
+ const VkImageView colorAttachment,
+ const deUint32 width,
+ const deUint32 height)
+{
+ const VkFramebufferCreateInfo framebufferInfo = {
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags;
+ renderPass, // VkRenderPass renderPass;
+ 1u, // uint32_t attachmentCount;
+ &colorAttachment, // const VkImageView* pAttachments;
+ width, // uint32_t width;
+ height, // uint32_t height;
+ 1u, // uint32_t layers;
+ };
+
+ return createFramebuffer(vk, device, &framebufferInfo);
+}
+
+GraphicsPipelineBuilder& GraphicsPipelineBuilder::setShader (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkShaderStageFlagBits stage,
+ const ProgramBinary& binary,
+ const VkSpecializationInfo* specInfo)
+{
+ VkShaderModule module;
+ switch (stage)
+ {
+ case (VK_SHADER_STAGE_VERTEX_BIT):
+ DE_ASSERT(m_vertexShaderModule.get() == DE_NULL);
+ m_vertexShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
+ module = *m_vertexShaderModule;
+ break;
+
+ case (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT):
+ DE_ASSERT(m_tessControlShaderModule.get() == DE_NULL);
+ m_tessControlShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
+ module = *m_tessControlShaderModule;
+ break;
+
+ case (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT):
+ DE_ASSERT(m_tessEvaluationShaderModule.get() == DE_NULL);
+ m_tessEvaluationShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
+ module = *m_tessEvaluationShaderModule;
+ break;
+
+ case (VK_SHADER_STAGE_GEOMETRY_BIT):
+ DE_ASSERT(m_geometryShaderModule.get() == DE_NULL);
+ m_geometryShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
+ module = *m_geometryShaderModule;
+ break;
+
+ case (VK_SHADER_STAGE_FRAGMENT_BIT):
+ DE_ASSERT(m_fragmentShaderModule.get() == DE_NULL);
+ m_fragmentShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0);
+ module = *m_fragmentShaderModule;
+ break;
+
+ default:
+ DE_FATAL("Invalid shader stage");
+ return *this;
+ }
+
+ const VkPipelineShaderStageCreateInfo pipelineShaderStageInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
+ stage, // VkShaderStageFlagBits stage;
+ module, // VkShaderModule module;
+ "main", // const char* pName;
+ specInfo, // const VkSpecializationInfo* pSpecializationInfo;
+ };
+
+ m_shaderStageFlags |= stage;
+ m_shaderStages.push_back(pipelineShaderStageInfo);
+
+ return *this;
+}
+
+Move<VkPipeline> GraphicsPipelineBuilder::build (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkPipelineLayout pipelineLayout,
+ const VkRenderPass renderPass)
+{
+ const VkVertexInputBindingDescription vertexInputBindingDescription =
+ {
+ 0u, // uint32_t binding;
+ sizeof(tcu::Vec4), // uint32_t stride; // Vertex is a 4-element vector XYZW, position only
+ VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
+ };
+
+ const VkVertexInputAttributeDescription vertexInputAttributeDescription =
+ {
+ 0u, // uint32_t location;
+ 0u, // uint32_t binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u, // uint32_t offset;
+ };
+
+ const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
+ 1u, // uint32_t vertexBindingDescriptionCount;
+ &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ 1u, // uint32_t vertexAttributeDescriptionCount;
+ &vertexInputAttributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+ };
+
+ const VkPrimitiveTopology topology = (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
+ topology, // VkPrimitiveTopology topology;
+ VK_FALSE, // VkBool32 primitiveRestartEnable;
+ };
+
+ const VkPipelineTessellationStateCreateInfo pipelineTessellationStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineTessellationStateCreateFlags)0, // VkPipelineTessellationStateCreateFlags flags;
+ 3u, // uint32_t patchControlPoints;
+ };
+
+ const VkViewport viewport = makeViewport(
+ 0.0f, 0.0f,
+ static_cast<float>(m_renderSize.x()), static_cast<float>(m_renderSize.y()),
+ 0.0f, 1.0f);
+
+ const VkRect2D scissor = {
+ makeOffset2D(0, 0),
+ makeExtent2D(m_renderSize.x(), m_renderSize.y()),
+ };
+
+ const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
+ 1u, // uint32_t viewportCount;
+ &viewport, // const VkViewport* pViewports;
+ 1u, // uint32_t scissorCount;
+ &scissor, // const VkRect2D* pScissors;
+ };
+
+ const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
+ VK_FALSE, // VkBool32 depthClampEnable;
+ VK_FALSE, // VkBool32 rasterizerDiscardEnable;
+ VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
+ VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
+ VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
+ VK_FALSE, // VkBool32 depthBiasEnable;
+ 0.0f, // float depthBiasConstantFactor;
+ 0.0f, // float depthBiasClamp;
+ 0.0f, // float depthBiasSlopeFactor;
+ 1.0f, // float lineWidth;
+ };
+
+ const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
+ VK_FALSE, // VkBool32 sampleShadingEnable;
+ 0.0f, // float minSampleShading;
+ DE_NULL, // const VkSampleMask* pSampleMask;
+ VK_FALSE, // VkBool32 alphaToCoverageEnable;
+ VK_FALSE // VkBool32 alphaToOneEnable;
+ };
+
+ const VkStencilOpState stencilOpStateBasic = makeStencilOpState(
+ VK_STENCIL_OP_KEEP, // stencil fail
+ VK_STENCIL_OP_KEEP, // depth & stencil pass
+ VK_STENCIL_OP_KEEP, // depth only fail
+ VK_COMPARE_OP_NEVER, // compare op
+ 0u, // compare mask
+ 0u, // write mask
+ 0u); // reference
+
+ VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
+ VK_FALSE, // VkBool32 depthTestEnable;
+ VK_FALSE, // VkBool32 depthWriteEnable;
+ VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
+ VK_FALSE, // VkBool32 depthBoundsTestEnable;
+ VK_FALSE, // VkBool32 stencilTestEnable;
+ stencilOpStateBasic, // VkStencilOpState front;
+ stencilOpStateBasic, // VkStencilOpState back;
+ 0.0f, // float minDepthBounds;
+ 1.0f, // float maxDepthBounds;
+ };
+
+ const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+ const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
+ {
+ VK_FALSE, // VkBool32 blendEnable;
+ VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
+ VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
+ colorComponentsAll, // VkColorComponentFlags colorWriteMask;
+ };
+
+ const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
+ VK_FALSE, // VkBool32 logicOpEnable;
+ VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
+ 1u, // deUint32 attachmentCount;
+ &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
+ { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
+ };
+
+ const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
+ {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
+ static_cast<deUint32>(m_shaderStages.size()), // deUint32 stageCount;
+ &m_shaderStages[0], // const VkPipelineShaderStageCreateInfo* pStages;
+ &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? &pipelineTessellationStateInfo : DE_NULL), // const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
+ &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
+ &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
+ pipelineLayout, // VkPipelineLayout layout;
+ renderPass, // VkRenderPass renderPass;
+ 0u, // deUint32 subpass;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0, // deInt32 basePipelineIndex;
+ };
+
+ return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
+}
+
+void requireFeatures (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const FeatureFlags flags)
+{
+ const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice);
+
+ if (((flags & FEATURE_TESSELLATION_SHADER) != 0) && !features.tessellationShader)
+ throw tcu::NotSupportedError("Tessellation shader not supported");
+
+ if (((flags & FEATURE_GEOMETRY_SHADER) != 0) && !features.geometryShader)
+ throw tcu::NotSupportedError("Geometry shader not supported");
+
+ if (((flags & FEATURE_SHADER_FLOAT_64) != 0) && !features.shaderFloat64)
+ throw tcu::NotSupportedError("Double-precision floats not supported");
+
+ if (((flags & FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS) != 0) && !features.vertexPipelineStoresAndAtomics)
+ throw tcu::NotSupportedError("SSBO and image writes not supported in vertex pipeline");
+
+ if (((flags & FEATURE_FRAGMENT_STORES_AND_ATOMICS) != 0) && !features.fragmentStoresAndAtomics)
+ throw tcu::NotSupportedError("SSBO and image writes not supported in fragment shader");
+}
+
+} // pipeline
+} // vkt
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.hpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.hpp
new file mode 100644
index 000000000..ededb936e
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineSpecConstantUtil.hpp
@@ -0,0 +1,194 @@
+#ifndef _VKTPIPELINESPECCONSTANTUTIL_HPP
+#define _VKTPIPELINESPECCONSTANTUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Pipeline specialization constants test utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "vkDefs.hpp"
+#include "vkRef.hpp"
+#include "vkPrograms.hpp"
+#include "vkMemUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "vkQueryUtil.hpp"
+
+namespace vkt
+{
+namespace pipeline
+{
+
+class Buffer
+{
+public:
+ Buffer (const vk::DeviceInterface& vk,
+ const vk::VkDevice device,
+ vk::Allocator& allocator,
+ const vk::VkBufferCreateInfo& bufferCreateInfo,
+ const vk::MemoryRequirement memoryRequirement)
+
+ : m_buffer (createBuffer(vk, device, &bufferCreateInfo))
+ , m_allocation (allocator.allocate(getBufferMemoryRequirements(vk, device, *m_buffer), memoryRequirement))
+ {
+ VK_CHECK(vk.bindBufferMemory(device, *m_buffer, m_allocation->getMemory(), m_allocation->getOffset()));
+ }
+
+ const vk::VkBuffer& get (void) const { return *m_buffer; }
+ const vk::VkBuffer& operator* (void) const { return get(); }
+ vk::Allocation& getAllocation (void) const { return *m_allocation; }
+
+private:
+ const vk::Unique<vk::VkBuffer> m_buffer;
+ const de::UniquePtr<vk::Allocation> m_allocation;
+
+ // "deleted"
+ Buffer (const Buffer&);
+ Buffer& operator= (const Buffer&);
+};
+
+class Image
+{
+public:
+ Image (const vk::DeviceInterface& vk,
+ const vk::VkDevice device,
+ vk::Allocator& allocator,
+ const vk::VkImageCreateInfo& imageCreateInfo,
+ const vk::MemoryRequirement memoryRequirement)
+
+ : m_image (createImage(vk, device, &imageCreateInfo))
+ , m_allocation (allocator.allocate(getImageMemoryRequirements(vk, device, *m_image), memoryRequirement))
+ {
+ VK_CHECK(vk.bindImageMemory(device, *m_image, m_allocation->getMemory(), m_allocation->getOffset()));
+ }
+
+ const vk::VkImage& get (void) const { return *m_image; }
+ const vk::VkImage& operator* (void) const { return get(); }
+ vk::Allocation& getAllocation (void) const { return *m_allocation; }
+
+private:
+ const vk::Unique<vk::VkImage> m_image;
+ const de::UniquePtr<vk::Allocation> m_allocation;
+
+ // "deleted"
+ Image (const Image&);
+ Image& operator= (const Image&);
+};
+
+class GraphicsPipelineBuilder
+{
+public:
+ GraphicsPipelineBuilder (void) : m_renderSize (16, 16)
+ , m_shaderStageFlags (0u) {}
+
+ GraphicsPipelineBuilder& setRenderSize (const tcu::IVec2& size) { m_renderSize = size; return *this; }
+ GraphicsPipelineBuilder& setShader (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkShaderStageFlagBits stage, const vk::ProgramBinary& binary, const vk::VkSpecializationInfo* specInfo);
+ vk::Move<vk::VkPipeline> build (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineLayout pipelineLayout, const vk::VkRenderPass renderPass);
+
+private:
+ tcu::IVec2 m_renderSize;
+ vk::Move<vk::VkShaderModule> m_vertexShaderModule;
+ vk::Move<vk::VkShaderModule> m_fragmentShaderModule;
+ vk::Move<vk::VkShaderModule> m_geometryShaderModule;
+ vk::Move<vk::VkShaderModule> m_tessControlShaderModule;
+ vk::Move<vk::VkShaderModule> m_tessEvaluationShaderModule;
+ std::vector<vk::VkPipelineShaderStageCreateInfo> m_shaderStages;
+ vk::VkShaderStageFlags m_shaderStageFlags;
+
+ GraphicsPipelineBuilder (const GraphicsPipelineBuilder&); // "deleted"
+ GraphicsPipelineBuilder& operator= (const GraphicsPipelineBuilder&);
+};
+
+enum FeatureFlagBits
+{
+ FEATURE_TESSELLATION_SHADER = 1u << 0,
+ FEATURE_GEOMETRY_SHADER = 1u << 1,
+ FEATURE_SHADER_FLOAT_64 = 1u << 2,
+ FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS = 1u << 3,
+ FEATURE_FRAGMENT_STORES_AND_ATOMICS = 1u << 4,
+};
+typedef deUint32 FeatureFlags;
+
+vk::VkBufferCreateInfo makeBufferCreateInfo (const vk::VkDeviceSize bufferSize, const vk::VkBufferUsageFlags usage);
+vk::VkImageCreateInfo makeImageCreateInfo (const tcu::IVec2& size, const vk::VkFormat format, const vk::VkImageUsageFlags usage);
+vk::Move<vk::VkCommandPool> makeCommandPool (const vk::DeviceInterface& vk, const vk::VkDevice device, const deUint32 queueFamilyIndex);
+vk::Move<vk::VkCommandBuffer> makeCommandBuffer (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkCommandPool commandPool);
+vk::Move<vk::VkDescriptorSet> makeDescriptorSet (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkDescriptorPool descriptorPool, const vk::VkDescriptorSetLayout setLayout);
+vk::Move<vk::VkPipelineLayout> makePipelineLayout (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkDescriptorSetLayout descriptorSetLayout);
+vk::Move<vk::VkPipeline> makeComputePipeline (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineLayout pipelineLayout, const vk::VkShaderModule shaderModule, const vk::VkSpecializationInfo* specInfo);
+vk::Move<vk::VkRenderPass> makeRenderPass (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkFormat colorFormat);
+vk::Move<vk::VkFramebuffer> makeFramebuffer (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkRenderPass renderPass, const vk::VkImageView colorAttachment, const deUint32 width, const deUint32 height);
+vk::Move<vk::VkImageView> makeImageView (const vk::DeviceInterface& vk, const vk::VkDevice vkDevice, const vk::VkImage image, const vk::VkImageViewType viewType, const vk::VkFormat format);
+vk::VkBufferMemoryBarrier makeBufferMemoryBarrier (const vk::VkAccessFlags srcAccessMask, const vk::VkAccessFlags dstAccessMask, const vk::VkBuffer buffer, const vk::VkDeviceSize offset, const vk::VkDeviceSize bufferSizeBytes);
+vk::VkImageMemoryBarrier makeImageMemoryBarrier (const vk::VkAccessFlags srcAccessMask, const vk::VkAccessFlags dstAccessMask, const vk::VkImageLayout oldLayout, const vk::VkImageLayout newLayout, const vk::VkImage image, const vk::VkImageSubresourceRange subresourceRange);
+void beginCommandBuffer (const vk::DeviceInterface& vk, const vk::VkCommandBuffer commandBuffer);
+void endCommandBuffer (const vk::DeviceInterface& vk, const vk::VkCommandBuffer commandBuffer);
+void submitCommandsAndWait (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkQueue queue, const vk::VkCommandBuffer commandBuffer);
+void beginRenderPass (const vk::DeviceInterface& vk, const vk::VkCommandBuffer commandBuffer, const vk::VkRenderPass renderPass, const vk::VkFramebuffer framebuffer, const vk::VkRect2D& renderArea, const tcu::Vec4& clearColor);
+void endRenderPass (const vk::DeviceInterface& vk, const vk::VkCommandBuffer commandBuffer);
+void requireFeatures (const vk::InstanceInterface& vki, const vk::VkPhysicalDevice physDevice, const FeatureFlags flags);
+
+// Ugly, brute-force replacement for the initializer list
+
+template<typename T>
+std::vector<T> makeVector (const T& o1)
+{
+ std::vector<T> vec;
+ vec.reserve(1);
+ vec.push_back(o1);
+ return vec;
+}
+
+template<typename T>
+std::vector<T> makeVector (const T& o1, const T& o2)
+{
+ std::vector<T> vec;
+ vec.reserve(2);
+ vec.push_back(o1);
+ vec.push_back(o2);
+ return vec;
+}
+
+template<typename T>
+std::vector<T> makeVector (const T& o1, const T& o2, const T& o3)
+{
+ std::vector<T> vec;
+ vec.reserve(3);
+ vec.push_back(o1);
+ vec.push_back(o2);
+ vec.push_back(o3);
+ return vec;
+}
+
+template<typename T>
+std::vector<T> makeVector (const T& o1, const T& o2, const T& o3, const T& o4)
+{
+ std::vector<T> vec;
+ vec.reserve(4);
+ vec.push_back(o1);
+ vec.push_back(o2);
+ vec.push_back(o3);
+ vec.push_back(o4);
+ return vec;
+}
+
+} // pipeline
+} // vkt
+
+#endif // _VKTPIPELINESPECCONSTANTUTIL_HPP
diff --git a/external/vulkancts/modules/vulkan/pipeline/vktPipelineTests.cpp b/external/vulkancts/modules/vulkan/pipeline/vktPipelineTests.cpp
index 04b4686de..04997bd7e 100644
--- a/external/vulkancts/modules/vulkan/pipeline/vktPipelineTests.cpp
+++ b/external/vulkancts/modules/vulkan/pipeline/vktPipelineTests.cpp
@@ -31,6 +31,7 @@
#include "vktPipelineSamplerTests.hpp"
#include "vktPipelineImageViewTests.hpp"
#include "vktPipelinePushConstantTests.hpp"
+#include "vktPipelineSpecConstantTests.hpp"
#include "vktPipelineMultisampleTests.hpp"
#include "vktPipelineVertexInputTests.hpp"
#include "vktPipelineTimestampTests.hpp"
@@ -57,6 +58,7 @@ void createChildren (tcu::TestCaseGroup* pipelineTests)
pipelineTests->addChild(createSamplerTests (testCtx));
pipelineTests->addChild(createImageViewTests (testCtx));
pipelineTests->addChild(createPushConstantTests (testCtx));
+ pipelineTests->addChild(createSpecConstantTests (testCtx));
pipelineTests->addChild(createMultisampleTests (testCtx));
pipelineTests->addChild(createVertexInputTests (testCtx));
pipelineTests->addChild(createInputAssemblyTests(testCtx));
diff --git a/external/vulkancts/mustpass/1.0.0/vk-default.txt b/external/vulkancts/mustpass/1.0.0/vk-default.txt
index 19fb5a0f4..17d54bbad 100644
--- a/external/vulkancts/mustpass/1.0.0/vk-default.txt
+++ b/external/vulkancts/mustpass/1.0.0/vk-default.txt
@@ -54281,6 +54281,1039 @@ dEQP-VK.pipeline.push_constant.graphics_pipeline.data_update_partial_1
dEQP-VK.pipeline.push_constant.graphics_pipeline.data_update_partial_2
dEQP-VK.pipeline.push_constant.graphics_pipeline.data_update_multiple
dEQP-VK.pipeline.push_constant.compute_pipeline.simple_test
+dEQP-VK.pipeline.spec_constant.graphics.vertex.default_value.bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.default_value.int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.default_value.uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.default_value.float
+dEQP-VK.pipeline.spec_constant.graphics.vertex.default_value.double
+dEQP-VK.pipeline.spec_constant.graphics.vertex.basic.bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.basic.int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.basic.uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.basic.float
+dEQP-VK.pipeline.spec_constant.graphics.vertex.basic.double
+dEQP-VK.pipeline.spec_constant.graphics.vertex.data_size.bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.data_size.int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.data_size.uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.builtin.default
+dEQP-VK.pipeline.spec_constant.graphics.vertex.builtin.specialized
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.array_size
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.graphics.vertex.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.float
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.double
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.array.struct
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.float
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.double
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.int
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.array
+dEQP-VK.pipeline.spec_constant.graphics.vertex.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.graphics.fragment.default_value.bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.default_value.int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.default_value.uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.default_value.float
+dEQP-VK.pipeline.spec_constant.graphics.fragment.default_value.double
+dEQP-VK.pipeline.spec_constant.graphics.fragment.basic.bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.basic.int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.basic.uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.basic.float
+dEQP-VK.pipeline.spec_constant.graphics.fragment.basic.double
+dEQP-VK.pipeline.spec_constant.graphics.fragment.data_size.bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.data_size.int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.data_size.uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.builtin.default
+dEQP-VK.pipeline.spec_constant.graphics.fragment.builtin.specialized
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.array_size
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.graphics.fragment.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.float
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.double
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.array.struct
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.float
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.double
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.int
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.array
+dEQP-VK.pipeline.spec_constant.graphics.fragment.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.default_value.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.default_value.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.default_value.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.default_value.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.default_value.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.basic.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.basic.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.basic.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.basic.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.basic.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.data_size.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.data_size.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.data_size.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.builtin.default
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.builtin.specialized
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.array_size
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.array.struct
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.array
+dEQP-VK.pipeline.spec_constant.graphics.tess_control.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.default_value.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.default_value.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.default_value.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.default_value.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.default_value.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.basic.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.basic.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.basic.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.basic.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.basic.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.data_size.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.data_size.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.data_size.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.builtin.default
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.builtin.specialized
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.array_size
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.array.struct
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.float
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.double
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.int
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.array
+dEQP-VK.pipeline.spec_constant.graphics.tess_eval.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.graphics.geometry.default_value.bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.default_value.int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.default_value.uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.default_value.float
+dEQP-VK.pipeline.spec_constant.graphics.geometry.default_value.double
+dEQP-VK.pipeline.spec_constant.graphics.geometry.basic.bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.basic.int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.basic.uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.basic.float
+dEQP-VK.pipeline.spec_constant.graphics.geometry.basic.double
+dEQP-VK.pipeline.spec_constant.graphics.geometry.data_size.bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.data_size.int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.data_size.uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.builtin.default
+dEQP-VK.pipeline.spec_constant.graphics.geometry.builtin.specialized
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.array_size
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.graphics.geometry.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.float
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.double
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.array.struct
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.float
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.double
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.int
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.array
+dEQP-VK.pipeline.spec_constant.graphics.geometry.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.compute.default_value.bool
+dEQP-VK.pipeline.spec_constant.compute.default_value.int
+dEQP-VK.pipeline.spec_constant.compute.default_value.uint
+dEQP-VK.pipeline.spec_constant.compute.default_value.float
+dEQP-VK.pipeline.spec_constant.compute.default_value.double
+dEQP-VK.pipeline.spec_constant.compute.basic.bool
+dEQP-VK.pipeline.spec_constant.compute.basic.int
+dEQP-VK.pipeline.spec_constant.compute.basic.uint
+dEQP-VK.pipeline.spec_constant.compute.basic.float
+dEQP-VK.pipeline.spec_constant.compute.basic.double
+dEQP-VK.pipeline.spec_constant.compute.data_size.bool
+dEQP-VK.pipeline.spec_constant.compute.data_size.int
+dEQP-VK.pipeline.spec_constant.compute.data_size.uint
+dEQP-VK.pipeline.spec_constant.compute.builtin.default
+dEQP-VK.pipeline.spec_constant.compute.builtin.specialized
+dEQP-VK.pipeline.spec_constant.compute.expression.spec_const_expression
+dEQP-VK.pipeline.spec_constant.compute.expression.array_size
+dEQP-VK.pipeline.spec_constant.compute.expression.array_size_expression
+dEQP-VK.pipeline.spec_constant.compute.expression.array_size_spec_const_expression
+dEQP-VK.pipeline.spec_constant.compute.expression.array_size_length
+dEQP-VK.pipeline.spec_constant.compute.expression.array_size_pass_to_function
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.vec2
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.vec3
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.vec4
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.dvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.dvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.dvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.bvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.bvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.bvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.ivec2
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.ivec3
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.ivec4
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.uvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.uvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.vector.uvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.mat4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.matrix.dmat4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.float
+dEQP-VK.pipeline.spec_constant.compute.composite.array.vec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.vec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.vec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.mat4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.double
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.dmat4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.int
+dEQP-VK.pipeline.spec_constant.compute.composite.array.ivec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.ivec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.ivec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.uint
+dEQP-VK.pipeline.spec_constant.compute.composite.array.uvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.uvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.uvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.bool
+dEQP-VK.pipeline.spec_constant.compute.composite.array.bvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.bvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.bvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_float
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_vec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_vec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_vec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_mat4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_double
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_dmat4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_int
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_ivec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_ivec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_ivec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_uint
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_uvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_uvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_uvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_bool
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_bvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_bvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.array.array_bvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.array.struct
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.float
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.vec2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.vec3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.vec4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.mat4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.double
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat2x3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat2x4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat3x2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat3x4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat4x2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat4x3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.dmat4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.int
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.ivec2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.ivec3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.ivec4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.uint
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.uvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.uvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.uvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.bool
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.bvec2
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.bvec3
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.bvec4
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.array
+dEQP-VK.pipeline.spec_constant.compute.composite.struct.struct
+dEQP-VK.pipeline.spec_constant.compute.local_size.x
+dEQP-VK.pipeline.spec_constant.compute.local_size.y
+dEQP-VK.pipeline.spec_constant.compute.local_size.z
+dEQP-VK.pipeline.spec_constant.compute.local_size.xy
+dEQP-VK.pipeline.spec_constant.compute.local_size.xz
+dEQP-VK.pipeline.spec_constant.compute.local_size.yz
+dEQP-VK.pipeline.spec_constant.compute.local_size.xyz
dEQP-VK.pipeline.multisample.raster_samples.samples_2.primitive_triangle
dEQP-VK.pipeline.multisample.raster_samples.samples_2.primitive_line
dEQP-VK.pipeline.multisample.raster_samples.samples_2.primitive_point
diff --git a/framework/opengl/gluShaderUtil.cpp b/framework/opengl/gluShaderUtil.cpp
index b9d8f7cc7..5d0ac984d 100644
--- a/framework/opengl/gluShaderUtil.cpp
+++ b/framework/opengl/gluShaderUtil.cpp
@@ -141,7 +141,7 @@ GLSLVersion getContextTypeGLSLVersion (ContextType type)
const char* getShaderTypeName (ShaderType shaderType)
{
- const char* s_names[] =
+ static const char* s_names[] =
{
"vertex",
"fragment",
@@ -160,7 +160,7 @@ const char* getShaderTypeName (ShaderType shaderType)
const char* getPrecisionName (Precision precision)
{
- const char* s_names[] =
+ static const char* s_names[] =
{
"lowp",
"mediump",
@@ -176,7 +176,7 @@ const char* getPrecisionName (Precision precision)
const char* getDataTypeName (DataType dataType)
{
- const char* s_names[] =
+ static const char* s_names[] =
{
"invalid",
"float",
@@ -192,6 +192,19 @@ const char* getDataTypeName (DataType dataType)
"mat4x2",
"mat4x3",
"mat4",
+ "double",
+ "dvec2",
+ "dvec3",
+ "dvec4",
+ "dmat2",
+ "dmat2x3",
+ "dmat2x4",
+ "dmat3x2",
+ "dmat3",
+ "dmat3x4",
+ "dmat4x2",
+ "dmat4x3",
+ "dmat4",
"int",
"ivec2",
"ivec3",
@@ -264,7 +277,7 @@ const char* getDataTypeName (DataType dataType)
int getDataTypeScalarSize (DataType dataType)
{
- const int s_sizes[] =
+ static const int s_sizes[] =
{
-1, // invalid
1, // float
@@ -280,6 +293,19 @@ int getDataTypeScalarSize (DataType dataType)
8, // mat4x2
12, // mat4x3
16, // mat4
+ 1, // double
+ 2, // dvec2
+ 3, // dvec3
+ 4, // dvec4
+ 4, // dmat2
+ 6, // dmat2x3
+ 8, // dmat2x4
+ 6, // dmat3x2
+ 9, // dmat3
+ 12, // dmat3x4
+ 8, // dmat4x2
+ 12, // dmat4x3
+ 16, // dmat4
1, // int
2, // ivec2
3, // ivec3
@@ -352,7 +378,7 @@ int getDataTypeScalarSize (DataType dataType)
DataType getDataTypeScalarType (DataType dataType)
{
- const DataType s_scalarTypes[] =
+ static const DataType s_scalarTypes[] =
{
TYPE_INVALID, // invalid
TYPE_FLOAT, // float
@@ -368,6 +394,19 @@ DataType getDataTypeScalarType (DataType dataType)
TYPE_FLOAT, // mat4x2
TYPE_FLOAT, // mat4x3
TYPE_FLOAT, // mat4
+ TYPE_DOUBLE, // double
+ TYPE_DOUBLE, // dvec2
+ TYPE_DOUBLE, // dvec3
+ TYPE_DOUBLE, // dvec4
+ TYPE_DOUBLE, // dmat2
+ TYPE_DOUBLE, // dmat2x3
+ TYPE_DOUBLE, // dmat2x4
+ TYPE_DOUBLE, // dmat3x2
+ TYPE_DOUBLE, // dmat3
+ TYPE_DOUBLE, // dmat3x4
+ TYPE_DOUBLE, // dmat4x2
+ TYPE_DOUBLE, // dmat4x3
+ TYPE_DOUBLE, // dmat4
TYPE_INT, // int
TYPE_INT, // ivec2
TYPE_INT, // ivec3
@@ -440,7 +479,7 @@ DataType getDataTypeScalarType (DataType dataType)
DataType getDataTypeFloatScalars (DataType dataType)
{
- const DataType s_floatTypes[] =
+ static const DataType s_floatTypes[] =
{
TYPE_INVALID, // invalid
TYPE_FLOAT, // float
@@ -456,6 +495,19 @@ DataType getDataTypeFloatScalars (DataType dataType)
TYPE_FLOAT_MAT4X2, // mat4x2
TYPE_FLOAT_MAT4X3, // mat4x3
TYPE_FLOAT_MAT4, // mat4
+ TYPE_FLOAT, // double
+ TYPE_FLOAT_VEC2, // dvec2
+ TYPE_FLOAT_VEC3, // dvec3
+ TYPE_FLOAT_VEC4, // dvec4
+ TYPE_FLOAT_MAT2, // dmat2
+ TYPE_FLOAT_MAT2X3, // dmat2x3
+ TYPE_FLOAT_MAT2X4, // dmat2x4
+ TYPE_FLOAT_MAT3X2, // dmat3x2
+ TYPE_FLOAT_MAT3, // dmat3
+ TYPE_FLOAT_MAT3X4, // dmat3x4
+ TYPE_FLOAT_MAT4X2, // dmat4x2
+ TYPE_FLOAT_MAT4X3, // dmat4x3
+ TYPE_FLOAT_MAT4, // dmat4
TYPE_FLOAT, // int
TYPE_FLOAT_VEC2, // ivec2
TYPE_FLOAT_VEC3, // ivec3
@@ -526,12 +578,114 @@ DataType getDataTypeFloatScalars (DataType dataType)
return s_floatTypes[(int)dataType];
}
+DataType getDataTypeDoubleScalars (DataType dataType)
+{
+ static const DataType s_doubleTypes[] =
+ {
+ TYPE_INVALID, // invalid
+ TYPE_DOUBLE, // float
+ TYPE_DOUBLE_VEC2, // vec2
+ TYPE_DOUBLE_VEC3, // vec3
+ TYPE_DOUBLE_VEC4, // vec4
+ TYPE_DOUBLE_MAT2, // mat2
+ TYPE_DOUBLE_MAT2X3, // mat2x3
+ TYPE_DOUBLE_MAT2X4, // mat2x4
+ TYPE_DOUBLE_MAT3X2, // mat3x2
+ TYPE_DOUBLE_MAT3, // mat3
+ TYPE_DOUBLE_MAT3X4, // mat3x4
+ TYPE_DOUBLE_MAT4X2, // mat4x2
+ TYPE_DOUBLE_MAT4X3, // mat4x3
+ TYPE_DOUBLE_MAT4, // mat4
+ TYPE_DOUBLE, // double
+ TYPE_DOUBLE_VEC2, // dvec2
+ TYPE_DOUBLE_VEC3, // dvec3
+ TYPE_DOUBLE_VEC4, // dvec4
+ TYPE_DOUBLE_MAT2, // dmat2
+ TYPE_DOUBLE_MAT2X3, // dmat2x3
+ TYPE_DOUBLE_MAT2X4, // dmat2x4
+ TYPE_DOUBLE_MAT3X2, // dmat3x2
+ TYPE_DOUBLE_MAT3, // dmat3
+ TYPE_DOUBLE_MAT3X4, // dmat3x4
+ TYPE_DOUBLE_MAT4X2, // dmat4x2
+ TYPE_DOUBLE_MAT4X3, // dmat4x3
+ TYPE_DOUBLE_MAT4, // dmat4
+ TYPE_DOUBLE, // int
+ TYPE_DOUBLE_VEC2, // ivec2
+ TYPE_DOUBLE_VEC3, // ivec3
+ TYPE_DOUBLE_VEC4, // ivec4
+ TYPE_DOUBLE, // uint
+ TYPE_DOUBLE_VEC2, // uvec2
+ TYPE_DOUBLE_VEC3, // uvec3
+ TYPE_DOUBLE_VEC4, // uvec4
+ TYPE_DOUBLE, // bool
+ TYPE_DOUBLE_VEC2, // bvec2
+ TYPE_DOUBLE_VEC3, // bvec3
+ TYPE_DOUBLE_VEC4, // bvec4
+ TYPE_INVALID, // sampler1D
+ TYPE_INVALID, // sampler2D
+ TYPE_INVALID, // samplerCube
+ TYPE_INVALID, // sampler2DArray
+ TYPE_INVALID, // sampler3D
+ TYPE_INVALID, // samplerCubeArray
+ TYPE_INVALID, // sampler1DShadow
+ TYPE_INVALID, // sampler2DShadow
+ TYPE_INVALID, // samplerCubeShadow
+ TYPE_INVALID, // sampler2DArrayShadow
+ TYPE_INVALID, // samplerCubeArrayShadow
+ TYPE_INVALID, // isampler1D
+ TYPE_INVALID, // isampler2D
+ TYPE_INVALID, // isamplerCube
+ TYPE_INVALID, // isampler2DArray
+ TYPE_INVALID, // isampler3D
+ TYPE_INVALID, // isamplerCubeArray
+ TYPE_INVALID, // usampler1D
+ TYPE_INVALID, // usampler2D
+ TYPE_INVALID, // usamplerCube
+ TYPE_INVALID, // usampler2DArray
+ TYPE_INVALID, // usampler3D
+ TYPE_INVALID, // usamplerCubeArray
+ TYPE_INVALID, // sampler2DMS
+ TYPE_INVALID, // isampler2DMS
+ TYPE_INVALID, // usampler2DMS
+ TYPE_INVALID, // image2D
+ TYPE_INVALID, // imageCube
+ TYPE_INVALID, // image2DArray
+ TYPE_INVALID, // image3D
+ TYPE_INVALID, // imageCubeArray
+ TYPE_INVALID, // iimage2D
+ TYPE_INVALID, // iimageCube
+ TYPE_INVALID, // iimage2DArray
+ TYPE_INVALID, // iimage3D
+ TYPE_INVALID, // iimageCubeArray
+ TYPE_INVALID, // uimage2D
+ TYPE_INVALID, // uimageCube
+ TYPE_INVALID, // uimage2DArray
+ TYPE_INVALID, // uimage3D
+ TYPE_INVALID, // uimageCubeArray
+ TYPE_INVALID, // atomic_uint
+ TYPE_INVALID, // samplerBuffer
+ TYPE_INVALID, // isamplerBuffer
+ TYPE_INVALID, // usamplerBuffer
+ TYPE_INVALID, // sampler2DMSArray
+ TYPE_INVALID, // isampler2DMSArray
+ TYPE_INVALID, // usampler2DMSArray
+ TYPE_INVALID, // imageBuffer
+ TYPE_INVALID, // iimageBuffer
+ TYPE_INVALID, // uimageBuffer
+ };
+
+ DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(s_doubleTypes) == TYPE_LAST);
+ DE_ASSERT(deInBounds32((int)dataType, 0, DE_LENGTH_OF_ARRAY(s_doubleTypes)));
+ return s_doubleTypes[(int)dataType];
+}
+
DataType getDataTypeVector (DataType scalarType, int size)
{
DE_ASSERT(deInRange32(size, 1, 4));
switch (scalarType)
{
case TYPE_FLOAT:
+ case TYPE_DOUBLE:
case TYPE_INT:
case TYPE_UINT:
case TYPE_BOOL:
@@ -571,15 +725,24 @@ int getDataTypeMatrixNumRows (DataType dataType)
{
switch (dataType)
{
- case TYPE_FLOAT_MAT2: return 2;
- case TYPE_FLOAT_MAT2X3: return 3;
- case TYPE_FLOAT_MAT2X4: return 4;
- case TYPE_FLOAT_MAT3X2: return 2;
- case TYPE_FLOAT_MAT3: return 3;
- case TYPE_FLOAT_MAT3X4: return 4;
- case TYPE_FLOAT_MAT4X2: return 2;
- case TYPE_FLOAT_MAT4X3: return 3;
- case TYPE_FLOAT_MAT4: return 4;
+ case TYPE_FLOAT_MAT2: return 2;
+ case TYPE_FLOAT_MAT2X3: return 3;
+ case TYPE_FLOAT_MAT2X4: return 4;
+ case TYPE_FLOAT_MAT3X2: return 2;
+ case TYPE_FLOAT_MAT3: return 3;
+ case TYPE_FLOAT_MAT3X4: return 4;
+ case TYPE_FLOAT_MAT4X2: return 2;
+ case TYPE_FLOAT_MAT4X3: return 3;
+ case TYPE_FLOAT_MAT4: return 4;
+ case TYPE_DOUBLE_MAT2: return 2;
+ case TYPE_DOUBLE_MAT2X3: return 3;
+ case TYPE_DOUBLE_MAT2X4: return 4;
+ case TYPE_DOUBLE_MAT3X2: return 2;
+ case TYPE_DOUBLE_MAT3: return 3;
+ case TYPE_DOUBLE_MAT3X4: return 4;
+ case TYPE_DOUBLE_MAT4X2: return 2;
+ case TYPE_DOUBLE_MAT4X3: return 3;
+ case TYPE_DOUBLE_MAT4: return 4;
default:
DE_ASSERT(false);
return 0;
@@ -590,15 +753,24 @@ int getDataTypeMatrixNumColumns (DataType dataType)
{
switch (dataType)
{
- case TYPE_FLOAT_MAT2: return 2;
- case TYPE_FLOAT_MAT2X3: return 2;
- case TYPE_FLOAT_MAT2X4: return 2;
- case TYPE_FLOAT_MAT3X2: return 3;
- case TYPE_FLOAT_MAT3: return 3;
- case TYPE_FLOAT_MAT3X4: return 3;
- case TYPE_FLOAT_MAT4X2: return 4;
- case TYPE_FLOAT_MAT4X3: return 4;
- case TYPE_FLOAT_MAT4: return 4;
+ case TYPE_FLOAT_MAT2: return 2;
+ case TYPE_FLOAT_MAT2X3: return 2;
+ case TYPE_FLOAT_MAT2X4: return 2;
+ case TYPE_FLOAT_MAT3X2: return 3;
+ case TYPE_FLOAT_MAT3: return 3;
+ case TYPE_FLOAT_MAT3X4: return 3;
+ case TYPE_FLOAT_MAT4X2: return 4;
+ case TYPE_FLOAT_MAT4X3: return 4;
+ case TYPE_FLOAT_MAT4: return 4;
+ case TYPE_DOUBLE_MAT2: return 2;
+ case TYPE_DOUBLE_MAT2X3: return 2;
+ case TYPE_DOUBLE_MAT2X4: return 2;
+ case TYPE_DOUBLE_MAT3X2: return 3;
+ case TYPE_DOUBLE_MAT3: return 3;
+ case TYPE_DOUBLE_MAT3X4: return 3;
+ case TYPE_DOUBLE_MAT4X2: return 4;
+ case TYPE_DOUBLE_MAT4X3: return 4;
+ case TYPE_DOUBLE_MAT4: return 4;
default:
DE_ASSERT(false);
return 0;
@@ -648,6 +820,23 @@ DataType getDataTypeFromGLType (deUint32 glType)
case GL_FLOAT_MAT4x3: return TYPE_FLOAT_MAT4X3;
case GL_FLOAT_MAT4: return TYPE_FLOAT_MAT4;
+ case GL_DOUBLE: return TYPE_DOUBLE;
+ case GL_DOUBLE_VEC2: return TYPE_DOUBLE_VEC2;
+ case GL_DOUBLE_VEC3: return TYPE_DOUBLE_VEC3;
+ case GL_DOUBLE_VEC4: return TYPE_DOUBLE_VEC4;
+
+ case GL_DOUBLE_MAT2: return TYPE_DOUBLE_MAT2;
+ case GL_DOUBLE_MAT2x3: return TYPE_DOUBLE_MAT2X3;
+ case GL_DOUBLE_MAT2x4: return TYPE_DOUBLE_MAT2X4;
+
+ case GL_DOUBLE_MAT3x2: return TYPE_DOUBLE_MAT3X2;
+ case GL_DOUBLE_MAT3: return TYPE_DOUBLE_MAT3;
+ case GL_DOUBLE_MAT3x4: return TYPE_DOUBLE_MAT3X4;
+
+ case GL_DOUBLE_MAT4x2: return TYPE_DOUBLE_MAT4X2;
+ case GL_DOUBLE_MAT4x3: return TYPE_DOUBLE_MAT4X3;
+ case GL_DOUBLE_MAT4: return TYPE_DOUBLE_MAT4;
+
case GL_INT: return TYPE_INT;
case GL_INT_VEC2: return TYPE_INT_VEC2;
case GL_INT_VEC3: return TYPE_INT_VEC3;
diff --git a/framework/opengl/gluShaderUtil.hpp b/framework/opengl/gluShaderUtil.hpp
index 9ebb745b9..45b8a1e44 100644
--- a/framework/opengl/gluShaderUtil.hpp
+++ b/framework/opengl/gluShaderUtil.hpp
@@ -112,6 +112,20 @@ enum DataType
TYPE_FLOAT_MAT4X3,
TYPE_FLOAT_MAT4,
+ TYPE_DOUBLE,
+ TYPE_DOUBLE_VEC2,
+ TYPE_DOUBLE_VEC3,
+ TYPE_DOUBLE_VEC4,
+ TYPE_DOUBLE_MAT2,
+ TYPE_DOUBLE_MAT2X3,
+ TYPE_DOUBLE_MAT2X4,
+ TYPE_DOUBLE_MAT3X2,
+ TYPE_DOUBLE_MAT3,
+ TYPE_DOUBLE_MAT3X4,
+ TYPE_DOUBLE_MAT4X2,
+ TYPE_DOUBLE_MAT4X3,
+ TYPE_DOUBLE_MAT4,
+
TYPE_INT,
TYPE_INT_VEC2,
TYPE_INT_VEC3,
@@ -197,6 +211,7 @@ const char* getDataTypeName (DataType dataType);
int getDataTypeScalarSize (DataType dataType);
DataType getDataTypeScalarType (DataType dataType);
DataType getDataTypeFloatScalars (DataType dataType);
+DataType getDataTypeDoubleScalars (DataType dataType);
DataType getDataTypeVector (DataType scalarType, int size);
DataType getDataTypeFloatVec (int vecSize);
DataType getDataTypeIntVec (int vecSize);
@@ -206,13 +221,14 @@ DataType getDataTypeMatrix (int numCols, int numRows);
DataType getDataTypeFromGLType (deUint32 glType);
inline bool isDataTypeFloatOrVec (DataType dataType) { return (dataType >= TYPE_FLOAT) && (dataType <= TYPE_FLOAT_VEC4); }
-inline bool isDataTypeMatrix (DataType dataType) { return (dataType >= TYPE_FLOAT_MAT2) && (dataType <= TYPE_FLOAT_MAT4); }
+inline bool isDataTypeDoubleOrDVec (DataType dataType) { return (dataType >= TYPE_DOUBLE) && (dataType <= TYPE_DOUBLE_VEC4); }
+inline bool isDataTypeMatrix (DataType dataType) { return ((dataType >= TYPE_FLOAT_MAT2) && (dataType <= TYPE_FLOAT_MAT4)) || ((dataType >= TYPE_DOUBLE_MAT2) && (dataType <= TYPE_DOUBLE_MAT4)); }
inline bool isDataTypeIntOrIVec (DataType dataType) { return (dataType >= TYPE_INT) && (dataType <= TYPE_INT_VEC4); }
-inline bool isDataTypeUintOrUVec (DataType dataType) { return (dataType >= TYPE_UINT) && (dataType <= TYPE_UINT_VEC4); }
+inline bool isDataTypeUintOrUVec (DataType dataType) { return (dataType >= TYPE_UINT) && (dataType <= TYPE_UINT_VEC4); }
inline bool isDataTypeBoolOrBVec (DataType dataType) { return (dataType >= TYPE_BOOL) && (dataType <= TYPE_BOOL_VEC4); }
-inline bool isDataTypeScalar (DataType dataType) { return (dataType == TYPE_FLOAT) || (dataType == TYPE_INT) || (dataType == TYPE_UINT) || (dataType == TYPE_BOOL); }
-inline bool isDataTypeVector (DataType dataType) { return deInRange32(dataType, TYPE_FLOAT_VEC2, TYPE_FLOAT_VEC4) || deInRange32(dataType, TYPE_INT_VEC2, TYPE_INT_VEC4) || deInRange32(dataType, TYPE_UINT_VEC2, TYPE_UINT_VEC4) || deInRange32(dataType, TYPE_BOOL_VEC2, TYPE_BOOL_VEC4); }
-inline bool isDataTypeScalarOrVector (DataType dataType) { return deInRange32(dataType, TYPE_FLOAT, TYPE_FLOAT_VEC4) || deInRange32(dataType, TYPE_INT, TYPE_INT_VEC4) || deInRange32(dataType, TYPE_UINT, TYPE_UINT_VEC4) || deInRange32(dataType, TYPE_BOOL, TYPE_BOOL_VEC4); }
+inline bool isDataTypeScalar (DataType dataType) { return (dataType == TYPE_FLOAT) || (dataType == TYPE_DOUBLE) ||(dataType == TYPE_INT) || (dataType == TYPE_UINT) || (dataType == TYPE_BOOL); }
+inline bool isDataTypeVector (DataType dataType) { return deInRange32(dataType, TYPE_FLOAT_VEC2, TYPE_FLOAT_VEC4) || deInRange32(dataType, TYPE_DOUBLE_VEC2, TYPE_DOUBLE_VEC4) || deInRange32(dataType, TYPE_INT_VEC2, TYPE_INT_VEC4) || deInRange32(dataType, TYPE_UINT_VEC2, TYPE_UINT_VEC4) || deInRange32(dataType, TYPE_BOOL_VEC2, TYPE_BOOL_VEC4); }
+inline bool isDataTypeScalarOrVector (DataType dataType) { return deInRange32(dataType, TYPE_FLOAT, TYPE_FLOAT_VEC4) || deInRange32(dataType, TYPE_DOUBLE, TYPE_DOUBLE_VEC4) || deInRange32(dataType, TYPE_INT, TYPE_INT_VEC4) || deInRange32(dataType, TYPE_UINT, TYPE_UINT_VEC4) || deInRange32(dataType, TYPE_BOOL, TYPE_BOOL_VEC4); }
inline bool isDataTypeSampler (DataType dataType) { return (dataType >= TYPE_SAMPLER_1D) && (dataType <= TYPE_UINT_SAMPLER_2D_MULTISAMPLE); }
inline bool isDataTypeImage (DataType dataType) { return (dataType >= TYPE_IMAGE_2D) && (dataType <= TYPE_UINT_IMAGE_3D); }
inline bool isDataTypeSamplerMultisample(DataType dataType) { return (dataType >= TYPE_SAMPLER_2D_MULTISAMPLE) && (dataType <= TYPE_UINT_SAMPLER_2D_MULTISAMPLE); }