summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/glslang/SPIRV
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/glslang/SPIRV')
-rw-r--r--src/3rdparty/glslang/SPIRV/GLSL.ext.AMD.h108
-rw-r--r--src/3rdparty/glslang/SPIRV/GLSL.ext.EXT.h38
-rw-r--r--src/3rdparty/glslang/SPIRV/GLSL.ext.KHR.h45
-rw-r--r--src/3rdparty/glslang/SPIRV/GLSL.ext.NV.h78
-rw-r--r--src/3rdparty/glslang/SPIRV/GLSL.std.450.h131
-rw-r--r--src/3rdparty/glslang/SPIRV/GlslangToSpv.cpp7980
-rw-r--r--src/3rdparty/glslang/SPIRV/GlslangToSpv.h61
-rw-r--r--src/3rdparty/glslang/SPIRV/InReadableOrder.cpp113
-rw-r--r--src/3rdparty/glslang/SPIRV/Logger.cpp68
-rw-r--r--src/3rdparty/glslang/SPIRV/Logger.h74
-rw-r--r--src/3rdparty/glslang/SPIRV/SPVRemapper.cpp1487
-rw-r--r--src/3rdparty/glslang/SPIRV/SPVRemapper.h304
-rw-r--r--src/3rdparty/glslang/SPIRV/SpvBuilder.cpp3048
-rw-r--r--src/3rdparty/glslang/SPIRV/SpvBuilder.h749
-rw-r--r--src/3rdparty/glslang/SPIRV/SpvPostProcess.cpp389
-rw-r--r--src/3rdparty/glslang/SPIRV/SpvTools.cpp201
-rw-r--r--src/3rdparty/glslang/SPIRV/SpvTools.h80
-rw-r--r--src/3rdparty/glslang/SPIRV/bitutils.h81
-rw-r--r--src/3rdparty/glslang/SPIRV/disassemble.cpp759
-rw-r--r--src/3rdparty/glslang/SPIRV/disassemble.h53
-rw-r--r--src/3rdparty/glslang/SPIRV/doc.cpp2757
-rw-r--r--src/3rdparty/glslang/SPIRV/doc.h258
-rw-r--r--src/3rdparty/glslang/SPIRV/hex_float.h1078
-rw-r--r--src/3rdparty/glslang/SPIRV/spirv.hpp1343
-rw-r--r--src/3rdparty/glslang/SPIRV/spvIR.h441
25 files changed, 21724 insertions, 0 deletions
diff --git a/src/3rdparty/glslang/SPIRV/GLSL.ext.AMD.h b/src/3rdparty/glslang/SPIRV/GLSL.ext.AMD.h
new file mode 100644
index 0000000..009d2f1
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GLSL.ext.AMD.h
@@ -0,0 +1,108 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextAMD_H
+#define GLSLextAMD_H
+
+static const int GLSLextAMDVersion = 100;
+static const int GLSLextAMDRevision = 7;
+
+// SPV_AMD_shader_ballot
+static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
+
+enum ShaderBallotAMD {
+ ShaderBallotBadAMD = 0, // Don't use
+
+ SwizzleInvocationsAMD = 1,
+ SwizzleInvocationsMaskedAMD = 2,
+ WriteInvocationAMD = 3,
+ MbcntAMD = 4,
+
+ ShaderBallotCountAMD
+};
+
+// SPV_AMD_shader_trinary_minmax
+static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
+
+enum ShaderTrinaryMinMaxAMD {
+ ShaderTrinaryMinMaxBadAMD = 0, // Don't use
+
+ FMin3AMD = 1,
+ UMin3AMD = 2,
+ SMin3AMD = 3,
+ FMax3AMD = 4,
+ UMax3AMD = 5,
+ SMax3AMD = 6,
+ FMid3AMD = 7,
+ UMid3AMD = 8,
+ SMid3AMD = 9,
+
+ ShaderTrinaryMinMaxCountAMD
+};
+
+// SPV_AMD_shader_explicit_vertex_parameter
+static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
+
+enum ShaderExplicitVertexParameterAMD {
+ ShaderExplicitVertexParameterBadAMD = 0, // Don't use
+
+ InterpolateAtVertexAMD = 1,
+
+ ShaderExplicitVertexParameterCountAMD
+};
+
+// SPV_AMD_gcn_shader
+static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
+
+enum GcnShaderAMD {
+ GcnShaderBadAMD = 0, // Don't use
+
+ CubeFaceIndexAMD = 1,
+ CubeFaceCoordAMD = 2,
+ TimeAMD = 3,
+
+ GcnShaderCountAMD
+};
+
+// SPV_AMD_gpu_shader_half_float
+static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
+
+// SPV_AMD_texture_gather_bias_lod
+static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod";
+
+// SPV_AMD_gpu_shader_int16
+static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16";
+
+// SPV_AMD_shader_image_load_store_lod
+static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod";
+
+// SPV_AMD_shader_fragment_mask
+static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask";
+
+// SPV_AMD_gpu_shader_half_float_fetch
+static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch";
+
+#endif // #ifndef GLSLextAMD_H
diff --git a/src/3rdparty/glslang/SPIRV/GLSL.ext.EXT.h b/src/3rdparty/glslang/SPIRV/GLSL.ext.EXT.h
new file mode 100644
index 0000000..e29c055
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GLSL.ext.EXT.h
@@ -0,0 +1,38 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextEXT_H
+#define GLSLextEXT_H
+
+static const int GLSLextEXTVersion = 100;
+static const int GLSLextEXTRevision = 2;
+
+static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
+static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
+static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
+static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
+
+#endif // #ifndef GLSLextEXT_H
diff --git a/src/3rdparty/glslang/SPIRV/GLSL.ext.KHR.h b/src/3rdparty/glslang/SPIRV/GLSL.ext.KHR.h
new file mode 100644
index 0000000..333442b
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GLSL.ext.KHR.h
@@ -0,0 +1,45 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextKHR_H
+#define GLSLextKHR_H
+
+static const int GLSLextKHRVersion = 100;
+static const int GLSLextKHRRevision = 2;
+
+static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot";
+static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote";
+static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group";
+static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
+static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
+static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
+static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
+static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
+static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
+static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
+static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
+
+#endif // #ifndef GLSLextKHR_H
diff --git a/src/3rdparty/glslang/SPIRV/GLSL.ext.NV.h b/src/3rdparty/glslang/SPIRV/GLSL.ext.NV.h
new file mode 100644
index 0000000..ede2c57
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GLSL.ext.NV.h
@@ -0,0 +1,78 @@
+/*
+** Copyright (c) 2014-2017 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextNV_H
+#define GLSLextNV_H
+
+enum BuiltIn;
+enum Decoration;
+enum Op;
+enum Capability;
+
+static const int GLSLextNVVersion = 100;
+static const int GLSLextNVRevision = 11;
+
+//SPV_NV_sample_mask_override_coverage
+const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
+
+//SPV_NV_geometry_shader_passthrough
+const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough";
+
+//SPV_NV_viewport_array2
+const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2";
+const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array";
+
+//SPV_NV_stereo_view_rendering
+const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering";
+
+//SPV_NVX_multiview_per_view_attributes
+const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes";
+
+//SPV_NV_shader_subgroup_partitioned
+const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned";
+
+//SPV_NV_fragment_shader_barycentric
+const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric";
+
+//SPV_NV_compute_shader_derivatives
+const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives";
+
+//SPV_NV_shader_image_footprint
+const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint";
+
+//SPV_NV_mesh_shader
+const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader";
+
+//SPV_NV_raytracing
+const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing";
+
+//SPV_NV_shading_rate
+const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
+
+//SPV_NV_cooperative_matrix
+const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
+
+#endif // #ifndef GLSLextNV_H
diff --git a/src/3rdparty/glslang/SPIRV/GLSL.std.450.h b/src/3rdparty/glslang/SPIRV/GLSL.std.450.h
new file mode 100644
index 0000000..df31092
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 1;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/src/3rdparty/glslang/SPIRV/GlslangToSpv.cpp b/src/3rdparty/glslang/SPIRV/GlslangToSpv.cpp
new file mode 100644
index 0000000..25ef210
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GlslangToSpv.cpp
@@ -0,0 +1,7980 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// translate them to SPIR-V.
+//
+
+#include "spirv.hpp"
+#include "GlslangToSpv.h"
+#include "SpvBuilder.h"
+namespace spv {
+ #include "GLSL.std.450.h"
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+ #include "GLSL.ext.NV.h"
+}
+
+// Glslang includes
+#include "../glslang/MachineIndependent/localintermediate.h"
+#include "../glslang/MachineIndependent/SymbolTable.h"
+#include "../glslang/Include/Common.h"
+#include "../glslang/Include/revision.h"
+
+#include <fstream>
+#include <iomanip>
+#include <list>
+#include <map>
+#include <stack>
+#include <string>
+#include <vector>
+
+namespace {
+
+namespace {
+class SpecConstantOpModeGuard {
+public:
+ SpecConstantOpModeGuard(spv::Builder* builder)
+ : builder_(builder) {
+ previous_flag_ = builder->isInSpecConstCodeGenMode();
+ }
+ ~SpecConstantOpModeGuard() {
+ previous_flag_ ? builder_->setToSpecConstCodeGenMode()
+ : builder_->setToNormalCodeGenMode();
+ }
+ void turnOnSpecConstantOpMode() {
+ builder_->setToSpecConstCodeGenMode();
+ }
+
+private:
+ spv::Builder* builder_;
+ bool previous_flag_;
+};
+
+struct OpDecorations {
+ spv::Decoration precision;
+ spv::Decoration noContraction;
+ spv::Decoration nonUniform;
+};
+
+} // namespace
+
+//
+// The main holder of information for translating glslang to SPIR-V.
+//
+// Derives from the AST walking base class.
+//
+class TGlslangToSpvTraverser : public glslang::TIntermTraverser {
+public:
+ TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate*, spv::SpvBuildLogger* logger,
+ glslang::SpvOptions& options);
+ virtual ~TGlslangToSpvTraverser() { }
+
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*);
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary*);
+ void visitConstantUnion(glslang::TIntermConstantUnion*);
+ bool visitSelection(glslang::TVisit, glslang::TIntermSelection*);
+ bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch*);
+ void visitSymbol(glslang::TIntermSymbol* symbol);
+ bool visitUnary(glslang::TVisit, glslang::TIntermUnary*);
+ bool visitLoop(glslang::TVisit, glslang::TIntermLoop*);
+ bool visitBranch(glslang::TVisit visit, glslang::TIntermBranch*);
+
+ void finishSpv();
+ void dumpSpv(std::vector<unsigned int>& out);
+
+protected:
+ TGlslangToSpvTraverser(TGlslangToSpvTraverser&);
+ TGlslangToSpvTraverser& operator=(TGlslangToSpvTraverser&);
+
+ spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
+ spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
+ spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
+ spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
+ spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
+ spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
+ spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const;
+ spv::SelectionControlMask TranslateSwitchControl(const glslang::TIntermSwitch&) const;
+ spv::LoopControlMask TranslateLoopControl(const glslang::TIntermLoop&, unsigned int& dependencyLength) const;
+ spv::StorageClass TranslateStorageClass(const glslang::TType&);
+ void addIndirectionIndexCapabilities(const glslang::TType& baseType, const glslang::TType& indexType);
+ spv::Id createSpvVariable(const glslang::TIntermSymbol*);
+ spv::Id getSampledType(const glslang::TSampler&);
+ spv::Id getInvertedSwizzleType(const glslang::TIntermTyped&);
+ spv::Id createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped&, spv::Id parentResult);
+ void convertSwizzle(const glslang::TIntermAggregate&, std::vector<unsigned>& swizzle);
+ spv::Id convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly = false);
+ spv::Id convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking, const glslang::TQualifier&,
+ bool lastBufferBlockMember, bool forwardReferenceOnly = false);
+ bool filterMember(const glslang::TType& member);
+ spv::Id convertGlslangStructToSpvType(const glslang::TType&, const glslang::TTypeList* glslangStruct,
+ glslang::TLayoutPacking, const glslang::TQualifier&);
+ void decorateStructType(const glslang::TType&, const glslang::TTypeList* glslangStruct, glslang::TLayoutPacking,
+ const glslang::TQualifier&, spv::Id);
+ spv::Id makeArraySizeId(const glslang::TArraySizes&, int dim);
+ spv::Id accessChainLoad(const glslang::TType& type);
+ void accessChainStore(const glslang::TType& type, spv::Id rvalue);
+ void multiTypeStore(const glslang::TType&, spv::Id rValue);
+ glslang::TLayoutPacking getExplicitLayout(const glslang::TType& type) const;
+ int getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ int getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ void updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset,
+ int& nextOffset, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ void declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember);
+
+ bool isShaderEntryPoint(const glslang::TIntermAggregate* node);
+ bool writableParam(glslang::TStorageQualifier) const;
+ bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam);
+ void makeFunctions(const glslang::TIntermSequence&);
+ void makeGlobalInitializers(const glslang::TIntermSequence&);
+ void visitFunctions(const glslang::TIntermSequence&);
+ void handleFunctionEntry(const glslang::TIntermAggregate* node);
+ void translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments);
+ void translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments);
+ spv::Id createImageTextureFunctionCall(glslang::TIntermOperator* node);
+ spv::Id handleUserFunctionCall(const glslang::TIntermAggregate*);
+
+ spv::Id createBinaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right,
+ glslang::TBasicType typeProxy, bool reduceComparison = true);
+ spv::Id createBinaryMatrixOperation(spv::Op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right);
+ spv::Id createUnaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createUnaryMatrixOperation(spv::Op op, OpDecorations&, spv::Id typeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createConversion(glslang::TOperator op, OpDecorations&, spv::Id destTypeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize);
+ spv::Id makeSmearedConstant(spv::Id constant, int vectorSize);
+ spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, spv::Id typeId, std::vector<spv::Id>& operands);
+ spv::Id createSubgroupOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId);
+ spv::Id getSymbolId(const glslang::TIntermSymbol* node);
+#ifdef NV_EXTENSIONS
+ void addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier & qualifier);
+#endif
+ spv::Id createSpvConstant(const glslang::TIntermTyped&);
+ spv::Id createSpvConstantFromConstUnionArray(const glslang::TType& type, const glslang::TConstUnionArray&, int& nextConst, bool specConstant);
+ bool isTrivialLeaf(const glslang::TIntermTyped* node);
+ bool isTrivial(const glslang::TIntermTyped* node);
+ spv::Id createShortCircuit(glslang::TOperator, glslang::TIntermTyped& left, glslang::TIntermTyped& right);
+#ifdef AMD_EXTENSIONS
+ spv::Id getExtBuiltins(const char* name);
+#endif
+ void addPre13Extension(const char* ext)
+ {
+ if (builder.getSpvVersion() < glslang::EShTargetSpv_1_3)
+ builder.addExtension(ext);
+ }
+
+ glslang::SpvOptions& options;
+ spv::Function* shaderEntry;
+ spv::Function* currentFunction;
+ spv::Instruction* entryPoint;
+ int sequenceDepth;
+
+ spv::SpvBuildLogger* logger;
+
+ // There is a 1:1 mapping between a spv builder and a module; this is thread safe
+ spv::Builder builder;
+ bool inEntryPoint;
+ bool entryPointTerminated;
+ bool linkageOnly; // true when visiting the set of objects in the AST present only for establishing interface, whether or not they were statically used
+ std::set<spv::Id> iOSet; // all input/output variables from either static use or declaration of interface
+ const glslang::TIntermediate* glslangIntermediate;
+ spv::Id stdBuiltins;
+ std::unordered_map<const char*, spv::Id> extBuiltinMap;
+
+ std::unordered_map<int, spv::Id> symbolValues;
+ std::unordered_set<int> rValueParameters; // set of formal function parameters passed as rValues, rather than a pointer
+ std::unordered_map<std::string, spv::Function*> functionMap;
+ std::unordered_map<const glslang::TTypeList*, spv::Id> structMap[glslang::ElpCount][glslang::ElmCount];
+ // for mapping glslang block indices to spv indices (e.g., due to hidden members):
+ std::unordered_map<const glslang::TTypeList*, std::vector<int> > memberRemapper;
+ std::stack<bool> breakForLoop; // false means break for switch
+ std::unordered_map<std::string, const glslang::TIntermSymbol*> counterOriginator;
+ // Map pointee types for EbtReference to their forward pointers
+ std::map<const glslang::TType *, spv::Id> forwardPointers;
+};
+
+//
+// Helper functions for translating glslang representations to SPIR-V enumerants.
+//
+
+// Translate glslang profile to SPIR-V source language.
+spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile)
+{
+ switch (source) {
+ case glslang::EShSourceGlsl:
+ switch (profile) {
+ case ENoProfile:
+ case ECoreProfile:
+ case ECompatibilityProfile:
+ return spv::SourceLanguageGLSL;
+ case EEsProfile:
+ return spv::SourceLanguageESSL;
+ default:
+ return spv::SourceLanguageUnknown;
+ }
+ case glslang::EShSourceHlsl:
+ return spv::SourceLanguageHLSL;
+ default:
+ return spv::SourceLanguageUnknown;
+ }
+}
+
+// Translate glslang language (stage) to SPIR-V execution model.
+spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
+{
+ switch (stage) {
+ case EShLangVertex: return spv::ExecutionModelVertex;
+ case EShLangTessControl: return spv::ExecutionModelTessellationControl;
+ case EShLangTessEvaluation: return spv::ExecutionModelTessellationEvaluation;
+ case EShLangGeometry: return spv::ExecutionModelGeometry;
+ case EShLangFragment: return spv::ExecutionModelFragment;
+ case EShLangCompute: return spv::ExecutionModelGLCompute;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV: return spv::ExecutionModelRayGenerationNV;
+ case EShLangIntersectNV: return spv::ExecutionModelIntersectionNV;
+ case EShLangAnyHitNV: return spv::ExecutionModelAnyHitNV;
+ case EShLangClosestHitNV: return spv::ExecutionModelClosestHitNV;
+ case EShLangMissNV: return spv::ExecutionModelMissNV;
+ case EShLangCallableNV: return spv::ExecutionModelCallableNV;
+ case EShLangTaskNV: return spv::ExecutionModelTaskNV;
+ case EShLangMeshNV: return spv::ExecutionModelMeshNV;
+#endif
+ default:
+ assert(0);
+ return spv::ExecutionModelFragment;
+ }
+}
+
+// Translate glslang sampler type to SPIR-V dimensionality.
+spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
+{
+ switch (sampler.dim) {
+ case glslang::Esd1D: return spv::Dim1D;
+ case glslang::Esd2D: return spv::Dim2D;
+ case glslang::Esd3D: return spv::Dim3D;
+ case glslang::EsdCube: return spv::DimCube;
+ case glslang::EsdRect: return spv::DimRect;
+ case glslang::EsdBuffer: return spv::DimBuffer;
+ case glslang::EsdSubpass: return spv::DimSubpassData;
+ default:
+ assert(0);
+ return spv::Dim2D;
+ }
+}
+
+// Translate glslang precision to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision)
+{
+ switch (glslangPrecision) {
+ case glslang::EpqLow: return spv::DecorationRelaxedPrecision;
+ case glslang::EpqMedium: return spv::DecorationRelaxedPrecision;
+ default:
+ return spv::NoPrecision;
+ }
+}
+
+// Translate glslang type to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type)
+{
+ return TranslatePrecisionDecoration(type.getQualifier().precision);
+}
+
+// Translate glslang type to SPIR-V block decorations.
+spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer)
+{
+ if (type.getBasicType() == glslang::EbtBlock) {
+ switch (type.getQualifier().storage) {
+ case glslang::EvqUniform: return spv::DecorationBlock;
+ case glslang::EvqBuffer: return useStorageBuffer ? spv::DecorationBlock : spv::DecorationBufferBlock;
+ case glslang::EvqVaryingIn: return spv::DecorationBlock;
+ case glslang::EvqVaryingOut: return spv::DecorationBlock;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV: return spv::DecorationBlock;
+ case glslang::EvqPayloadInNV: return spv::DecorationBlock;
+ case glslang::EvqHitAttrNV: return spv::DecorationBlock;
+ case glslang::EvqCallableDataNV: return spv::DecorationBlock;
+ case glslang::EvqCallableDataInNV: return spv::DecorationBlock;
+#endif
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V memory decorations.
+void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory, bool useVulkanMemoryModel)
+{
+ if (!useVulkanMemoryModel) {
+ if (qualifier.coherent)
+ memory.push_back(spv::DecorationCoherent);
+ if (qualifier.volatil) {
+ memory.push_back(spv::DecorationVolatile);
+ memory.push_back(spv::DecorationCoherent);
+ }
+ }
+ if (qualifier.restrict)
+ memory.push_back(spv::DecorationRestrict);
+ if (qualifier.readonly)
+ memory.push_back(spv::DecorationNonWritable);
+ if (qualifier.writeonly)
+ memory.push_back(spv::DecorationNonReadable);
+}
+
+// Translate glslang type to SPIR-V layout decorations.
+spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::TLayoutMatrix matrixLayout)
+{
+ if (type.isMatrix()) {
+ switch (matrixLayout) {
+ case glslang::ElmRowMajor:
+ return spv::DecorationRowMajor;
+ case glslang::ElmColumnMajor:
+ return spv::DecorationColMajor;
+ default:
+ // opaque layouts don't need a majorness
+ return spv::DecorationMax;
+ }
+ } else {
+ switch (type.getBasicType()) {
+ default:
+ return spv::DecorationMax;
+ break;
+ case glslang::EbtBlock:
+ switch (type.getQualifier().storage) {
+ case glslang::EvqUniform:
+ case glslang::EvqBuffer:
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpShared: return spv::DecorationGLSLShared;
+ case glslang::ElpPacked: return spv::DecorationGLSLPacked;
+ default:
+ return spv::DecorationMax;
+ }
+ case glslang::EvqVaryingIn:
+ case glslang::EvqVaryingOut:
+ if (type.getQualifier().isTaskMemory()) {
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpShared: return spv::DecorationGLSLShared;
+ case glslang::ElpPacked: return spv::DecorationGLSLPacked;
+ default: break;
+ }
+ } else {
+ assert(type.getQualifier().layoutPacking == glslang::ElpNone);
+ }
+ return spv::DecorationMax;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV:
+ case glslang::EvqPayloadInNV:
+ case glslang::EvqHitAttrNV:
+ case glslang::EvqCallableDataNV:
+ case glslang::EvqCallableDataInNV:
+ return spv::DecorationMax;
+#endif
+ default:
+ assert(0);
+ return spv::DecorationMax;
+ }
+ }
+ }
+}
+
+// Translate glslang type to SPIR-V interpolation decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.smooth)
+ // Smooth decoration doesn't exist in SPIR-V 1.0
+ return spv::DecorationMax;
+ else if (qualifier.nopersp)
+ return spv::DecorationNoPerspective;
+ else if (qualifier.flat)
+ return spv::DecorationFlat;
+#ifdef AMD_EXTENSIONS
+ else if (qualifier.explicitInterp) {
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::DecorationExplicitInterpAMD;
+ }
+#endif
+ else
+ return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V auxiliary storage decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.patch)
+ return spv::DecorationPatch;
+ else if (qualifier.centroid)
+ return spv::DecorationCentroid;
+ else if (qualifier.sample) {
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::DecorationSample;
+ } else
+ return spv::DecorationMax;
+}
+
+// If glslang type is invariant, return SPIR-V invariant decoration.
+spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.invariant)
+ return spv::DecorationInvariant;
+ else
+ return spv::DecorationMax;
+}
+
+// If glslang type is noContraction, return SPIR-V NoContraction decoration.
+spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.noContraction)
+ return spv::DecorationNoContraction;
+ else
+ return spv::DecorationMax;
+}
+
+// If glslang type is nonUniform, return SPIR-V NonUniform decoration.
+spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.isNonUniform()) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityShaderNonUniformEXT);
+ return spv::DecorationNonUniformEXT;
+ } else
+ return spv::DecorationMax;
+}
+
+spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) {
+ return spv::MemoryAccessMaskNone;
+ }
+ spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone;
+ if (coherentFlags.volatil ||
+ coherentFlags.coherent ||
+ coherentFlags.devicecoherent ||
+ coherentFlags.queuefamilycoherent ||
+ coherentFlags.workgroupcoherent ||
+ coherentFlags.subgroupcoherent) {
+ mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask |
+ spv::MemoryAccessMakePointerVisibleKHRMask;
+ }
+ if (coherentFlags.nonprivate) {
+ mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask;
+ }
+ if (coherentFlags.volatil) {
+ mask = mask | spv::MemoryAccessVolatileMask;
+ }
+ if (mask != spv::MemoryAccessMaskNone) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ return mask;
+}
+
+spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ if (!glslangIntermediate->usingVulkanMemoryModel()) {
+ return spv::ImageOperandsMaskNone;
+ }
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (coherentFlags.volatil ||
+ coherentFlags.coherent ||
+ coherentFlags.devicecoherent ||
+ coherentFlags.queuefamilycoherent ||
+ coherentFlags.workgroupcoherent ||
+ coherentFlags.subgroupcoherent) {
+ mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask |
+ spv::ImageOperandsMakeTexelVisibleKHRMask;
+ }
+ if (coherentFlags.nonprivate) {
+ mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask;
+ }
+ if (coherentFlags.volatil) {
+ mask = mask | spv::ImageOperandsVolatileTexelKHRMask;
+ }
+ if (mask != spv::ImageOperandsMaskNone) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ return mask;
+}
+
+spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type)
+{
+ spv::Builder::AccessChain::CoherentFlags flags;
+ flags.coherent = type.getQualifier().coherent;
+ flags.devicecoherent = type.getQualifier().devicecoherent;
+ flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent;
+ // shared variables are implicitly workgroupcoherent in GLSL.
+ flags.workgroupcoherent = type.getQualifier().workgroupcoherent ||
+ type.getQualifier().storage == glslang::EvqShared;
+ flags.subgroupcoherent = type.getQualifier().subgroupcoherent;
+ flags.volatil = type.getQualifier().volatil;
+ // *coherent variables are implicitly nonprivate in GLSL
+ flags.nonprivate = type.getQualifier().nonprivate ||
+ flags.subgroupcoherent ||
+ flags.workgroupcoherent ||
+ flags.queuefamilycoherent ||
+ flags.devicecoherent ||
+ flags.coherent ||
+ flags.volatil;
+ flags.isImage = type.getBasicType() == glslang::EbtSampler;
+ return flags;
+}
+
+spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ spv::Scope scope;
+ if (coherentFlags.volatil || coherentFlags.coherent) {
+ // coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model
+ scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
+ } else if (coherentFlags.devicecoherent) {
+ scope = spv::ScopeDevice;
+ } else if (coherentFlags.queuefamilycoherent) {
+ scope = spv::ScopeQueueFamilyKHR;
+ } else if (coherentFlags.workgroupcoherent) {
+ scope = spv::ScopeWorkgroup;
+ } else if (coherentFlags.subgroupcoherent) {
+ scope = spv::ScopeSubgroup;
+ } else {
+ scope = spv::ScopeMax;
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return scope;
+}
+
+// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
+// associated capabilities when required. For some built-in variables, a capability
+// is generated only when using the variable in an executable instruction, but not when
+// just declaring a struct member variable with it. This is true for PointSize,
+// ClipDistance, and CullDistance.
+spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltInVariable builtIn, bool memberDeclaration)
+{
+ switch (builtIn) {
+ case glslang::EbvPointSize:
+ // Defer adding the capability until the built-in is actually used.
+ if (! memberDeclaration) {
+ switch (glslangIntermediate->getStage()) {
+ case EShLangGeometry:
+ builder.addCapability(spv::CapabilityGeometryPointSize);
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ builder.addCapability(spv::CapabilityTessellationPointSize);
+ break;
+ default:
+ break;
+ }
+ }
+ return spv::BuiltInPointSize;
+
+ // These *Distance capabilities logically belong here, but if the member is declared and
+ // then never used, consumers of SPIR-V prefer the capability not be declared.
+ // They are now generated when used, rather than here when declared.
+ // Potentially, the specification should be more clear what the minimum
+ // use needed is to trigger the capability.
+ //
+ case glslang::EbvClipDistance:
+ if (!memberDeclaration)
+ builder.addCapability(spv::CapabilityClipDistance);
+ return spv::BuiltInClipDistance;
+
+ case glslang::EbvCullDistance:
+ if (!memberDeclaration)
+ builder.addCapability(spv::CapabilityCullDistance);
+ return spv::BuiltInCullDistance;
+
+ case glslang::EbvViewportIndex:
+ builder.addCapability(spv::CapabilityMultiViewport);
+ if (glslangIntermediate->getStage() == EShLangVertex ||
+ glslangIntermediate->getStage() == EShLangTessControl ||
+ glslangIntermediate->getStage() == EShLangTessEvaluation) {
+
+ builder.addExtension(spv::E_SPV_EXT_shader_viewport_index_layer);
+ builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
+ }
+ return spv::BuiltInViewportIndex;
+
+ case glslang::EbvSampleId:
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::BuiltInSampleId;
+
+ case glslang::EbvSamplePosition:
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::BuiltInSamplePosition;
+
+ case glslang::EbvSampleMask:
+ return spv::BuiltInSampleMask;
+
+ case glslang::EbvLayer:
+#ifdef NV_EXTENSIONS
+ if (glslangIntermediate->getStage() == EShLangMeshNV) {
+ return spv::BuiltInLayer;
+ }
+#endif
+ builder.addCapability(spv::CapabilityGeometry);
+ if (glslangIntermediate->getStage() == EShLangVertex ||
+ glslangIntermediate->getStage() == EShLangTessControl ||
+ glslangIntermediate->getStage() == EShLangTessEvaluation) {
+
+ builder.addExtension(spv::E_SPV_EXT_shader_viewport_index_layer);
+ builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
+ }
+ return spv::BuiltInLayer;
+
+ case glslang::EbvPosition: return spv::BuiltInPosition;
+ case glslang::EbvVertexId: return spv::BuiltInVertexId;
+ case glslang::EbvInstanceId: return spv::BuiltInInstanceId;
+ case glslang::EbvVertexIndex: return spv::BuiltInVertexIndex;
+ case glslang::EbvInstanceIndex: return spv::BuiltInInstanceIndex;
+
+ case glslang::EbvBaseVertex:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInBaseVertex;
+
+ case glslang::EbvBaseInstance:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInBaseInstance;
+
+ case glslang::EbvDrawId:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInDrawIndex;
+
+ case glslang::EbvPrimitiveId:
+ if (glslangIntermediate->getStage() == EShLangFragment)
+ builder.addCapability(spv::CapabilityGeometry);
+ return spv::BuiltInPrimitiveId;
+
+ case glslang::EbvFragStencilRef:
+ builder.addExtension(spv::E_SPV_EXT_shader_stencil_export);
+ builder.addCapability(spv::CapabilityStencilExportEXT);
+ return spv::BuiltInFragStencilRefEXT;
+
+ case glslang::EbvInvocationId: return spv::BuiltInInvocationId;
+ case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner;
+ case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter;
+ case glslang::EbvTessCoord: return spv::BuiltInTessCoord;
+ case glslang::EbvPatchVertices: return spv::BuiltInPatchVertices;
+ case glslang::EbvFragCoord: return spv::BuiltInFragCoord;
+ case glslang::EbvPointCoord: return spv::BuiltInPointCoord;
+ case glslang::EbvFace: return spv::BuiltInFrontFacing;
+ case glslang::EbvFragDepth: return spv::BuiltInFragDepth;
+ case glslang::EbvHelperInvocation: return spv::BuiltInHelperInvocation;
+ case glslang::EbvNumWorkGroups: return spv::BuiltInNumWorkgroups;
+ case glslang::EbvWorkGroupSize: return spv::BuiltInWorkgroupSize;
+ case glslang::EbvWorkGroupId: return spv::BuiltInWorkgroupId;
+ case glslang::EbvLocalInvocationId: return spv::BuiltInLocalInvocationId;
+ case glslang::EbvLocalInvocationIndex: return spv::BuiltInLocalInvocationIndex;
+ case glslang::EbvGlobalInvocationId: return spv::BuiltInGlobalInvocationId;
+
+ case glslang::EbvSubGroupSize:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupSize;
+
+ case glslang::EbvSubGroupInvocation:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLocalInvocationId;
+
+ case glslang::EbvSubGroupEqMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupEqMaskKHR;
+
+ case glslang::EbvSubGroupGeMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupGeMaskKHR;
+
+ case glslang::EbvSubGroupGtMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupGtMaskKHR;
+
+ case glslang::EbvSubGroupLeMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLeMaskKHR;
+
+ case glslang::EbvSubGroupLtMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLtMaskKHR;
+
+ case glslang::EbvNumSubgroups:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInNumSubgroups;
+
+ case glslang::EbvSubgroupID:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupId;
+
+ case glslang::EbvSubgroupSize2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupSize;
+
+ case glslang::EbvSubgroupInvocation2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupLocalInvocationId;
+
+ case glslang::EbvSubgroupEqMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupEqMask;
+
+ case glslang::EbvSubgroupGeMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupGeMask;
+
+ case glslang::EbvSubgroupGtMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupGtMask;
+
+ case glslang::EbvSubgroupLeMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupLeMask;
+
+ case glslang::EbvSubgroupLtMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupLtMask;
+#ifdef AMD_EXTENSIONS
+ case glslang::EbvBaryCoordNoPersp:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspAMD;
+
+ case glslang::EbvBaryCoordNoPerspCentroid:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspCentroidAMD;
+
+ case glslang::EbvBaryCoordNoPerspSample:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspSampleAMD;
+
+ case glslang::EbvBaryCoordSmooth:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothAMD;
+
+ case glslang::EbvBaryCoordSmoothCentroid:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothCentroidAMD;
+
+ case glslang::EbvBaryCoordSmoothSample:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothSampleAMD;
+
+ case glslang::EbvBaryCoordPullModel:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordPullModelAMD;
+#endif
+
+ case glslang::EbvDeviceIndex:
+ addPre13Extension(spv::E_SPV_KHR_device_group);
+ builder.addCapability(spv::CapabilityDeviceGroup);
+ return spv::BuiltInDeviceIndex;
+
+ case glslang::EbvViewIndex:
+ addPre13Extension(spv::E_SPV_KHR_multiview);
+ builder.addCapability(spv::CapabilityMultiView);
+ return spv::BuiltInViewIndex;
+
+ case glslang::EbvFragSizeEXT:
+ builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density);
+ builder.addCapability(spv::CapabilityFragmentDensityEXT);
+ return spv::BuiltInFragSizeEXT;
+
+ case glslang::EbvFragInvocationCountEXT:
+ builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density);
+ builder.addCapability(spv::CapabilityFragmentDensityEXT);
+ return spv::BuiltInFragInvocationCountEXT;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EbvViewportMaskNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ }
+ return spv::BuiltInViewportMaskNV;
+ case glslang::EbvSecondaryPositionNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ }
+ return spv::BuiltInSecondaryPositionNV;
+ case glslang::EbvSecondaryViewportMaskNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ }
+ return spv::BuiltInSecondaryViewportMaskNV;
+ case glslang::EbvPositionPerViewNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes);
+ builder.addCapability(spv::CapabilityPerViewAttributesNV);
+ }
+ return spv::BuiltInPositionPerViewNV;
+ case glslang::EbvViewportMaskPerViewNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes);
+ builder.addCapability(spv::CapabilityPerViewAttributesNV);
+ }
+ return spv::BuiltInViewportMaskPerViewNV;
+ case glslang::EbvFragFullyCoveredNV:
+ builder.addExtension(spv::E_SPV_EXT_fragment_fully_covered);
+ builder.addCapability(spv::CapabilityFragmentFullyCoveredEXT);
+ return spv::BuiltInFullyCoveredEXT;
+ case glslang::EbvFragmentSizeNV:
+ builder.addExtension(spv::E_SPV_NV_shading_rate);
+ builder.addCapability(spv::CapabilityShadingRateNV);
+ return spv::BuiltInFragmentSizeNV;
+ case glslang::EbvInvocationsPerPixelNV:
+ builder.addExtension(spv::E_SPV_NV_shading_rate);
+ builder.addCapability(spv::CapabilityShadingRateNV);
+ return spv::BuiltInInvocationsPerPixelNV;
+
+ // raytracing
+ case glslang::EbvLaunchIdNV:
+ return spv::BuiltInLaunchIdNV;
+ case glslang::EbvLaunchSizeNV:
+ return spv::BuiltInLaunchSizeNV;
+ case glslang::EbvWorldRayOriginNV:
+ return spv::BuiltInWorldRayOriginNV;
+ case glslang::EbvWorldRayDirectionNV:
+ return spv::BuiltInWorldRayDirectionNV;
+ case glslang::EbvObjectRayOriginNV:
+ return spv::BuiltInObjectRayOriginNV;
+ case glslang::EbvObjectRayDirectionNV:
+ return spv::BuiltInObjectRayDirectionNV;
+ case glslang::EbvRayTminNV:
+ return spv::BuiltInRayTminNV;
+ case glslang::EbvRayTmaxNV:
+ return spv::BuiltInRayTmaxNV;
+ case glslang::EbvInstanceCustomIndexNV:
+ return spv::BuiltInInstanceCustomIndexNV;
+ case glslang::EbvHitTNV:
+ return spv::BuiltInHitTNV;
+ case glslang::EbvHitKindNV:
+ return spv::BuiltInHitKindNV;
+ case glslang::EbvObjectToWorldNV:
+ return spv::BuiltInObjectToWorldNV;
+ case glslang::EbvWorldToObjectNV:
+ return spv::BuiltInWorldToObjectNV;
+ case glslang::EbvIncomingRayFlagsNV:
+ return spv::BuiltInIncomingRayFlagsNV;
+ case glslang::EbvBaryCoordNV:
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ return spv::BuiltInBaryCoordNV;
+ case glslang::EbvBaryCoordNoPerspNV:
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ return spv::BuiltInBaryCoordNoPerspNV;
+ case glslang::EbvTaskCountNV:
+ return spv::BuiltInTaskCountNV;
+ case glslang::EbvPrimitiveCountNV:
+ return spv::BuiltInPrimitiveCountNV;
+ case glslang::EbvPrimitiveIndicesNV:
+ return spv::BuiltInPrimitiveIndicesNV;
+ case glslang::EbvClipDistancePerViewNV:
+ return spv::BuiltInClipDistancePerViewNV;
+ case glslang::EbvCullDistancePerViewNV:
+ return spv::BuiltInCullDistancePerViewNV;
+ case glslang::EbvLayerPerViewNV:
+ return spv::BuiltInLayerPerViewNV;
+ case glslang::EbvMeshViewCountNV:
+ return spv::BuiltInMeshViewCountNV;
+ case glslang::EbvMeshViewIndicesNV:
+ return spv::BuiltInMeshViewIndicesNV;
+#endif
+ default:
+ return spv::BuiltInMax;
+ }
+}
+
+// Translate glslang image layout format to SPIR-V image format.
+spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TType& type)
+{
+ assert(type.getBasicType() == glslang::EbtSampler);
+
+ // Check for capabilities
+ switch (type.getQualifier().layoutFormat) {
+ case glslang::ElfRg32f:
+ case glslang::ElfRg16f:
+ case glslang::ElfR11fG11fB10f:
+ case glslang::ElfR16f:
+ case glslang::ElfRgba16:
+ case glslang::ElfRgb10A2:
+ case glslang::ElfRg16:
+ case glslang::ElfRg8:
+ case glslang::ElfR16:
+ case glslang::ElfR8:
+ case glslang::ElfRgba16Snorm:
+ case glslang::ElfRg16Snorm:
+ case glslang::ElfRg8Snorm:
+ case glslang::ElfR16Snorm:
+ case glslang::ElfR8Snorm:
+
+ case glslang::ElfRg32i:
+ case glslang::ElfRg16i:
+ case glslang::ElfRg8i:
+ case glslang::ElfR16i:
+ case glslang::ElfR8i:
+
+ case glslang::ElfRgb10a2ui:
+ case glslang::ElfRg32ui:
+ case glslang::ElfRg16ui:
+ case glslang::ElfRg8ui:
+ case glslang::ElfR16ui:
+ case glslang::ElfR8ui:
+ builder.addCapability(spv::CapabilityStorageImageExtendedFormats);
+ break;
+
+ default:
+ break;
+ }
+
+ // do the translation
+ switch (type.getQualifier().layoutFormat) {
+ case glslang::ElfNone: return spv::ImageFormatUnknown;
+ case glslang::ElfRgba32f: return spv::ImageFormatRgba32f;
+ case glslang::ElfRgba16f: return spv::ImageFormatRgba16f;
+ case glslang::ElfR32f: return spv::ImageFormatR32f;
+ case glslang::ElfRgba8: return spv::ImageFormatRgba8;
+ case glslang::ElfRgba8Snorm: return spv::ImageFormatRgba8Snorm;
+ case glslang::ElfRg32f: return spv::ImageFormatRg32f;
+ case glslang::ElfRg16f: return spv::ImageFormatRg16f;
+ case glslang::ElfR11fG11fB10f: return spv::ImageFormatR11fG11fB10f;
+ case glslang::ElfR16f: return spv::ImageFormatR16f;
+ case glslang::ElfRgba16: return spv::ImageFormatRgba16;
+ case glslang::ElfRgb10A2: return spv::ImageFormatRgb10A2;
+ case glslang::ElfRg16: return spv::ImageFormatRg16;
+ case glslang::ElfRg8: return spv::ImageFormatRg8;
+ case glslang::ElfR16: return spv::ImageFormatR16;
+ case glslang::ElfR8: return spv::ImageFormatR8;
+ case glslang::ElfRgba16Snorm: return spv::ImageFormatRgba16Snorm;
+ case glslang::ElfRg16Snorm: return spv::ImageFormatRg16Snorm;
+ case glslang::ElfRg8Snorm: return spv::ImageFormatRg8Snorm;
+ case glslang::ElfR16Snorm: return spv::ImageFormatR16Snorm;
+ case glslang::ElfR8Snorm: return spv::ImageFormatR8Snorm;
+ case glslang::ElfRgba32i: return spv::ImageFormatRgba32i;
+ case glslang::ElfRgba16i: return spv::ImageFormatRgba16i;
+ case glslang::ElfRgba8i: return spv::ImageFormatRgba8i;
+ case glslang::ElfR32i: return spv::ImageFormatR32i;
+ case glslang::ElfRg32i: return spv::ImageFormatRg32i;
+ case glslang::ElfRg16i: return spv::ImageFormatRg16i;
+ case glslang::ElfRg8i: return spv::ImageFormatRg8i;
+ case glslang::ElfR16i: return spv::ImageFormatR16i;
+ case glslang::ElfR8i: return spv::ImageFormatR8i;
+ case glslang::ElfRgba32ui: return spv::ImageFormatRgba32ui;
+ case glslang::ElfRgba16ui: return spv::ImageFormatRgba16ui;
+ case glslang::ElfRgba8ui: return spv::ImageFormatRgba8ui;
+ case glslang::ElfR32ui: return spv::ImageFormatR32ui;
+ case glslang::ElfRg32ui: return spv::ImageFormatRg32ui;
+ case glslang::ElfRg16ui: return spv::ImageFormatRg16ui;
+ case glslang::ElfRgb10a2ui: return spv::ImageFormatRgb10a2ui;
+ case glslang::ElfRg8ui: return spv::ImageFormatRg8ui;
+ case glslang::ElfR16ui: return spv::ImageFormatR16ui;
+ case glslang::ElfR8ui: return spv::ImageFormatR8ui;
+ default: return spv::ImageFormatMax;
+ }
+}
+
+spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSelectionControl(const glslang::TIntermSelection& selectionNode) const
+{
+ if (selectionNode.getFlatten())
+ return spv::SelectionControlFlattenMask;
+ if (selectionNode.getDontFlatten())
+ return spv::SelectionControlDontFlattenMask;
+ return spv::SelectionControlMaskNone;
+}
+
+spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSwitchControl(const glslang::TIntermSwitch& switchNode) const
+{
+ if (switchNode.getFlatten())
+ return spv::SelectionControlFlattenMask;
+ if (switchNode.getDontFlatten())
+ return spv::SelectionControlDontFlattenMask;
+ return spv::SelectionControlMaskNone;
+}
+
+// return a non-0 dependency if the dependency argument must be set
+spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang::TIntermLoop& loopNode,
+ unsigned int& dependencyLength) const
+{
+ spv::LoopControlMask control = spv::LoopControlMaskNone;
+
+ if (loopNode.getDontUnroll())
+ control = control | spv::LoopControlDontUnrollMask;
+ if (loopNode.getUnroll())
+ control = control | spv::LoopControlUnrollMask;
+ if (unsigned(loopNode.getLoopDependency()) == glslang::TIntermLoop::dependencyInfinite)
+ control = control | spv::LoopControlDependencyInfiniteMask;
+ else if (loopNode.getLoopDependency() > 0) {
+ control = control | spv::LoopControlDependencyLengthMask;
+ dependencyLength = loopNode.getLoopDependency();
+ }
+
+ return control;
+}
+
+// Translate glslang type to SPIR-V storage class.
+spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type)
+{
+ if (type.getQualifier().isPipeInput())
+ return spv::StorageClassInput;
+ if (type.getQualifier().isPipeOutput())
+ return spv::StorageClassOutput;
+
+ if (glslangIntermediate->getSource() != glslang::EShSourceHlsl ||
+ type.getQualifier().storage == glslang::EvqUniform) {
+ if (type.getBasicType() == glslang::EbtAtomicUint)
+ return spv::StorageClassAtomicCounter;
+ if (type.containsOpaque())
+ return spv::StorageClassUniformConstant;
+ }
+
+#ifdef NV_EXTENSIONS
+ if (type.getQualifier().isUniformOrBuffer() &&
+ type.getQualifier().layoutShaderRecordNV) {
+ return spv::StorageClassShaderRecordBufferNV;
+ }
+#endif
+
+ if (glslangIntermediate->usingStorageBuffer() && type.getQualifier().storage == glslang::EvqBuffer) {
+ addPre13Extension(spv::E_SPV_KHR_storage_buffer_storage_class);
+ return spv::StorageClassStorageBuffer;
+ }
+
+ if (type.getQualifier().isUniformOrBuffer()) {
+ if (type.getQualifier().layoutPushConstant)
+ return spv::StorageClassPushConstant;
+ if (type.getBasicType() == glslang::EbtBlock)
+ return spv::StorageClassUniform;
+ return spv::StorageClassUniformConstant;
+ }
+
+ switch (type.getQualifier().storage) {
+ case glslang::EvqShared: return spv::StorageClassWorkgroup;
+ case glslang::EvqGlobal: return spv::StorageClassPrivate;
+ case glslang::EvqConstReadOnly: return spv::StorageClassFunction;
+ case glslang::EvqTemporary: return spv::StorageClassFunction;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV: return spv::StorageClassRayPayloadNV;
+ case glslang::EvqPayloadInNV: return spv::StorageClassIncomingRayPayloadNV;
+ case glslang::EvqHitAttrNV: return spv::StorageClassHitAttributeNV;
+ case glslang::EvqCallableDataNV: return spv::StorageClassCallableDataNV;
+ case glslang::EvqCallableDataInNV: return spv::StorageClassIncomingCallableDataNV;
+#endif
+ default:
+ assert(0);
+ break;
+ }
+
+ return spv::StorageClassFunction;
+}
+
+// Add capabilities pertaining to how an array is indexed.
+void TGlslangToSpvTraverser::addIndirectionIndexCapabilities(const glslang::TType& baseType,
+ const glslang::TType& indexType)
+{
+ if (indexType.getQualifier().isNonUniform()) {
+ // deal with an asserted non-uniform index
+ // SPV_EXT_descriptor_indexing already added in TranslateNonUniformDecoration
+ if (baseType.getBasicType() == glslang::EbtSampler) {
+ if (baseType.getQualifier().hasAttachment())
+ builder.addCapability(spv::CapabilityInputAttachmentArrayNonUniformIndexingEXT);
+ else if (baseType.isImage() && baseType.getSampler().dim == glslang::EsdBuffer)
+ builder.addCapability(spv::CapabilityStorageTexelBufferArrayNonUniformIndexingEXT);
+ else if (baseType.isTexture() && baseType.getSampler().dim == glslang::EsdBuffer)
+ builder.addCapability(spv::CapabilityUniformTexelBufferArrayNonUniformIndexingEXT);
+ else if (baseType.isImage())
+ builder.addCapability(spv::CapabilityStorageImageArrayNonUniformIndexingEXT);
+ else if (baseType.isTexture())
+ builder.addCapability(spv::CapabilitySampledImageArrayNonUniformIndexingEXT);
+ } else if (baseType.getBasicType() == glslang::EbtBlock) {
+ if (baseType.getQualifier().storage == glslang::EvqBuffer)
+ builder.addCapability(spv::CapabilityStorageBufferArrayNonUniformIndexingEXT);
+ else if (baseType.getQualifier().storage == glslang::EvqUniform)
+ builder.addCapability(spv::CapabilityUniformBufferArrayNonUniformIndexingEXT);
+ }
+ } else {
+ // assume a dynamically uniform index
+ if (baseType.getBasicType() == glslang::EbtSampler) {
+ if (baseType.getQualifier().hasAttachment()) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityInputAttachmentArrayDynamicIndexingEXT);
+ } else if (baseType.isImage() && baseType.getSampler().dim == glslang::EsdBuffer) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityStorageTexelBufferArrayDynamicIndexingEXT);
+ } else if (baseType.isTexture() && baseType.getSampler().dim == glslang::EsdBuffer) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityUniformTexelBufferArrayDynamicIndexingEXT);
+ }
+ }
+ }
+}
+
+// Return whether or not the given type is something that should be tied to a
+// descriptor set.
+bool IsDescriptorResource(const glslang::TType& type)
+{
+ // uniform and buffer blocks are included, unless it is a push_constant
+ if (type.getBasicType() == glslang::EbtBlock)
+ return type.getQualifier().isUniformOrBuffer() &&
+#ifdef NV_EXTENSIONS
+ ! type.getQualifier().layoutShaderRecordNV &&
+#endif
+ ! type.getQualifier().layoutPushConstant;
+
+ // non block...
+ // basically samplerXXX/subpass/sampler/texture are all included
+ // if they are the global-scope-class, not the function parameter
+ // (or local, if they ever exist) class.
+ if (type.getBasicType() == glslang::EbtSampler)
+ return type.getQualifier().isUniformOrBuffer();
+
+ // None of the above.
+ return false;
+}
+
+void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent)
+{
+ if (child.layoutMatrix == glslang::ElmNone)
+ child.layoutMatrix = parent.layoutMatrix;
+
+ if (parent.invariant)
+ child.invariant = true;
+ if (parent.nopersp)
+ child.nopersp = true;
+#ifdef AMD_EXTENSIONS
+ if (parent.explicitInterp)
+ child.explicitInterp = true;
+#endif
+ if (parent.flat)
+ child.flat = true;
+ if (parent.centroid)
+ child.centroid = true;
+ if (parent.patch)
+ child.patch = true;
+ if (parent.sample)
+ child.sample = true;
+ if (parent.coherent)
+ child.coherent = true;
+ if (parent.devicecoherent)
+ child.devicecoherent = true;
+ if (parent.queuefamilycoherent)
+ child.queuefamilycoherent = true;
+ if (parent.workgroupcoherent)
+ child.workgroupcoherent = true;
+ if (parent.subgroupcoherent)
+ child.subgroupcoherent = true;
+ if (parent.nonprivate)
+ child.nonprivate = true;
+ if (parent.volatil)
+ child.volatil = true;
+ if (parent.restrict)
+ child.restrict = true;
+ if (parent.readonly)
+ child.readonly = true;
+ if (parent.writeonly)
+ child.writeonly = true;
+#ifdef NV_EXTENSIONS
+ if (parent.perPrimitiveNV)
+ child.perPrimitiveNV = true;
+ if (parent.perViewNV)
+ child.perViewNV = true;
+ if (parent.perTaskNV)
+ child.perTaskNV = true;
+#endif
+}
+
+bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
+{
+ // This should list qualifiers that simultaneous satisfy:
+ // - struct members might inherit from a struct declaration
+ // (note that non-block structs don't explicitly inherit,
+ // only implicitly, meaning no decoration involved)
+ // - affect decorations on the struct members
+ // (note smooth does not, and expecting something like volatile
+ // to effect the whole object)
+ // - are not part of the offset/st430/etc or row/column-major layout
+ return qualifier.invariant || (qualifier.hasLocation() && type.getBasicType() == glslang::EbtBlock);
+}
+
+//
+// Implement the TGlslangToSpvTraverser class.
+//
+
+TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate* glslangIntermediate,
+ spv::SpvBuildLogger* buildLogger, glslang::SpvOptions& options)
+ : TIntermTraverser(true, false, true),
+ options(options),
+ shaderEntry(nullptr), currentFunction(nullptr),
+ sequenceDepth(0), logger(buildLogger),
+ builder(spvVersion, (glslang::GetKhronosToolId() << 16) | glslang::GetSpirvGeneratorVersion(), logger),
+ inEntryPoint(false), entryPointTerminated(false), linkageOnly(false),
+ glslangIntermediate(glslangIntermediate)
+{
+ spv::ExecutionModel executionModel = TranslateExecutionModel(glslangIntermediate->getStage());
+
+ builder.clearAccessChain();
+ builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()),
+ glslangIntermediate->getVersion());
+
+ if (options.generateDebugInfo) {
+ builder.setEmitOpLines();
+ builder.setSourceFile(glslangIntermediate->getSourceFile());
+
+ // Set the source shader's text. If for SPV version 1.0, include
+ // a preamble in comments stating the OpModuleProcessed instructions.
+ // Otherwise, emit those as actual instructions.
+ std::string text;
+ const std::vector<std::string>& processes = glslangIntermediate->getProcesses();
+ for (int p = 0; p < (int)processes.size(); ++p) {
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1) {
+ text.append("// OpModuleProcessed ");
+ text.append(processes[p]);
+ text.append("\n");
+ } else
+ builder.addModuleProcessed(processes[p]);
+ }
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1 && (int)processes.size() > 0)
+ text.append("#line 1\n");
+ text.append(glslangIntermediate->getSourceText());
+ builder.setSourceText(text);
+ // Pass name and text for all included files
+ const std::map<std::string, std::string>& include_txt = glslangIntermediate->getIncludeText();
+ for (auto iItr = include_txt.begin(); iItr != include_txt.end(); ++iItr)
+ builder.addInclude(iItr->first, iItr->second);
+ }
+ stdBuiltins = builder.import("GLSL.std.450");
+
+ spv::AddressingModel addressingModel = spv::AddressingModelLogical;
+ spv::MemoryModel memoryModel = spv::MemoryModelGLSL450;
+
+ if (glslangIntermediate->usingPhysicalStorageBuffer()) {
+ addressingModel = spv::AddressingModelPhysicalStorageBuffer64EXT;
+ builder.addExtension(spv::E_SPV_EXT_physical_storage_buffer);
+ builder.addCapability(spv::CapabilityPhysicalStorageBufferAddressesEXT);
+ };
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ memoryModel = spv::MemoryModelVulkanKHR;
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ builder.addExtension(spv::E_SPV_KHR_vulkan_memory_model);
+ }
+ builder.setMemoryModel(addressingModel, memoryModel);
+
+ if (glslangIntermediate->usingVariablePointers()) {
+ builder.addCapability(spv::CapabilityVariablePointers);
+ }
+
+ shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
+ entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
+
+ // Add the source extensions
+ const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
+ for (auto it = sourceExtensions.begin(); it != sourceExtensions.end(); ++it)
+ builder.addSourceExtension(it->c_str());
+
+ // Add the top-level modes for this shader.
+
+ if (glslangIntermediate->getXfbMode()) {
+ builder.addCapability(spv::CapabilityTransformFeedback);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeXfb);
+ }
+
+ unsigned int mode;
+ switch (glslangIntermediate->getStage()) {
+ case EShLangVertex:
+ builder.addCapability(spv::CapabilityShader);
+ break;
+
+ case EShLangTessEvaluation:
+ case EShLangTessControl:
+ builder.addCapability(spv::CapabilityTessellation);
+
+ glslang::TLayoutGeometry primitive;
+
+ if (glslangIntermediate->getStage() == EShLangTessControl) {
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ primitive = glslangIntermediate->getOutputPrimitive();
+ } else {
+ primitive = glslangIntermediate->getInputPrimitive();
+ }
+
+ switch (primitive) {
+ case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break;
+ case glslang::ElgQuads: mode = spv::ExecutionModeQuads; break;
+ case glslang::ElgIsolines: mode = spv::ExecutionModeIsolines; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ switch (glslangIntermediate->getVertexSpacing()) {
+ case glslang::EvsEqual: mode = spv::ExecutionModeSpacingEqual; break;
+ case glslang::EvsFractionalEven: mode = spv::ExecutionModeSpacingFractionalEven; break;
+ case glslang::EvsFractionalOdd: mode = spv::ExecutionModeSpacingFractionalOdd; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ switch (glslangIntermediate->getVertexOrder()) {
+ case glslang::EvoCw: mode = spv::ExecutionModeVertexOrderCw; break;
+ case glslang::EvoCcw: mode = spv::ExecutionModeVertexOrderCcw; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ if (glslangIntermediate->getPointMode())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePointMode);
+ break;
+
+ case EShLangGeometry:
+ builder.addCapability(spv::CapabilityGeometry);
+ switch (glslangIntermediate->getInputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeInputPoints; break;
+ case glslang::ElgLines: mode = spv::ExecutionModeInputLines; break;
+ case glslang::ElgLinesAdjacency: mode = spv::ExecutionModeInputLinesAdjacency; break;
+ case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break;
+ case glslang::ElgTrianglesAdjacency: mode = spv::ExecutionModeInputTrianglesAdjacency; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeInvocations, glslangIntermediate->getInvocations());
+
+ switch (glslangIntermediate->getOutputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break;
+ case glslang::ElgLineStrip: mode = spv::ExecutionModeOutputLineStrip; break;
+ case glslang::ElgTriangleStrip: mode = spv::ExecutionModeOutputTriangleStrip; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ break;
+
+ case EShLangFragment:
+ builder.addCapability(spv::CapabilityShader);
+ if (glslangIntermediate->getPixelCenterInteger())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePixelCenterInteger);
+
+ if (glslangIntermediate->getOriginUpperLeft())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginUpperLeft);
+ else
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginLowerLeft);
+
+ if (glslangIntermediate->getEarlyFragmentTests())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeEarlyFragmentTests);
+
+ if (glslangIntermediate->getPostDepthCoverage()) {
+ builder.addCapability(spv::CapabilitySampleMaskPostDepthCoverage);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePostDepthCoverage);
+ builder.addExtension(spv::E_SPV_KHR_post_depth_coverage);
+ }
+
+ switch(glslangIntermediate->getDepth()) {
+ case glslang::EldGreater: mode = spv::ExecutionModeDepthGreater; break;
+ case glslang::EldLess: mode = spv::ExecutionModeDepthLess; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ if (glslangIntermediate->getDepth() != glslang::EldUnchanged && glslangIntermediate->isDepthReplacing())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDepthReplacing);
+ break;
+
+ case EShLangCompute:
+ builder.addCapability(spv::CapabilityShader);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0),
+ glslangIntermediate->getLocalSize(1),
+ glslangIntermediate->getLocalSize(2));
+#ifdef NV_EXTENSIONS
+ if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupQuads) {
+ builder.addCapability(spv::CapabilityComputeDerivativeGroupQuadsNV);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupQuadsNV);
+ builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives);
+ } else if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupLinear) {
+ builder.addCapability(spv::CapabilityComputeDerivativeGroupLinearNV);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupLinearNV);
+ builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives);
+ }
+#endif
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ builder.addCapability(spv::CapabilityRayTracingNV);
+ builder.addExtension("SPV_NV_ray_tracing");
+ break;
+ case EShLangTaskNV:
+ case EShLangMeshNV:
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0),
+ glslangIntermediate->getLocalSize(1),
+ glslangIntermediate->getLocalSize(2));
+ if (glslangIntermediate->getStage() == EShLangMeshNV) {
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputPrimitivesNV, glslangIntermediate->getPrimitives());
+
+ switch (glslangIntermediate->getOutputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break;
+ case glslang::ElgLines: mode = spv::ExecutionModeOutputLinesNV; break;
+ case glslang::ElgTriangles: mode = spv::ExecutionModeOutputTrianglesNV; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+ }
+ break;
+#endif
+
+ default:
+ break;
+ }
+}
+
+// Finish creating SPV, after the traversal is complete.
+void TGlslangToSpvTraverser::finishSpv()
+{
+ // Finish the entry point function
+ if (! entryPointTerminated) {
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ builder.leaveFunction();
+ }
+
+ // finish off the entry-point SPV instruction by adding the Input/Output <id>
+ for (auto it = iOSet.cbegin(); it != iOSet.cend(); ++it)
+ entryPoint->addIdOperand(*it);
+
+ // Add capabilities, extensions, remove unneeded decorations, etc.,
+ // based on the resulting SPIR-V.
+ builder.postProcess();
+}
+
+// Write the SPV into 'out'.
+void TGlslangToSpvTraverser::dumpSpv(std::vector<unsigned int>& out)
+{
+ builder.dump(out);
+}
+
+//
+// Implement the traversal functions.
+//
+// Return true from interior nodes to have the external traversal
+// continue on to children. Return false if children were
+// already processed.
+//
+
+//
+// Symbols can turn into
+// - uniform/input reads
+// - output writes
+// - complex lvalue base setups: foo.bar[3].... , where we see foo and start up an access chain
+// - something simple that degenerates into the last bullet
+//
+void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol)
+{
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (symbol->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ // getSymbolId() will set up all the IO decorations on the first call.
+ // Formal function parameters were mapped during makeFunctions().
+ spv::Id id = getSymbolId(symbol);
+
+ // Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction
+ if (builder.isPointer(id)) {
+ spv::StorageClass sc = builder.getStorageClass(id);
+ if (sc == spv::StorageClassInput || sc == spv::StorageClassOutput) {
+ if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0)
+ iOSet.insert(id);
+ }
+ }
+
+ // Only process non-linkage-only nodes for generating actual static uses
+ if (! linkageOnly || symbol->getQualifier().isSpecConstant()) {
+ // Prepare to generate code for the access
+
+ // L-value chains will be computed left to right. We're on the symbol now,
+ // which is the left-most part of the access chain, so now is "clear" time,
+ // followed by setting the base.
+ builder.clearAccessChain();
+
+ // For now, we consider all user variables as being in memory, so they are pointers,
+ // except for
+ // A) R-Value arguments to a function, which are an intermediate object.
+ // See comments in handleUserFunctionCall().
+ // B) Specialization constants (normal constants don't even come in as a variable),
+ // These are also pure R-values.
+ glslang::TQualifier qualifier = symbol->getQualifier();
+ if (qualifier.isSpecConstant() || rValueParameters.find(symbol->getId()) != rValueParameters.end())
+ builder.setAccessChainRValue(id);
+ else
+ builder.setAccessChainLValue(id);
+ }
+
+ // Process linkage-only nodes for any special additional interface work.
+ if (linkageOnly) {
+ if (glslangIntermediate->getHlslFunctionality1()) {
+ // Map implicit counter buffers to their originating buffers, which should have been
+ // seen by now, given earlier pruning of unused counters, and preservation of order
+ // of declaration.
+ if (symbol->getType().getQualifier().isUniformOrBuffer()) {
+ if (!glslangIntermediate->hasCounterBufferName(symbol->getName())) {
+ // Save possible originating buffers for counter buffers, keyed by
+ // making the potential counter-buffer name.
+ std::string keyName = symbol->getName().c_str();
+ keyName = glslangIntermediate->addCounterBufferName(keyName);
+ counterOriginator[keyName] = symbol;
+ } else {
+ // Handle a counter buffer, by finding the saved originating buffer.
+ std::string keyName = symbol->getName().c_str();
+ auto it = counterOriginator.find(keyName);
+ if (it != counterOriginator.end()) {
+ id = getSymbolId(it->second);
+ if (id != spv::NoResult) {
+ spv::Id counterId = getSymbolId(symbol);
+ if (counterId != spv::NoResult) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addDecorationId(id, spv::DecorationHlslCounterBufferGOOGLE, counterId);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::TIntermBinary* node)
+{
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ // First, handle special cases
+ switch (node->getOp()) {
+ case glslang::EOpAssign:
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+ case glslang::EOpAndAssign:
+ case glslang::EOpInclusiveOrAssign:
+ case glslang::EOpExclusiveOrAssign:
+ case glslang::EOpLeftShiftAssign:
+ case glslang::EOpRightShiftAssign:
+ // A bin-op assign "a += b" means the same thing as "a = a + b"
+ // where a is evaluated before b. For a simple assignment, GLSL
+ // says to evaluate the left before the right. So, always, left
+ // node then right node.
+ {
+ // get the left l-value, save it away
+ builder.clearAccessChain();
+ node->getLeft()->traverse(this);
+ spv::Builder::AccessChain lValue = builder.getAccessChain();
+
+ // evaluate the right
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id rValue = accessChainLoad(node->getRight()->getType());
+
+ if (node->getOp() != glslang::EOpAssign) {
+ // the left is also an r-value
+ builder.setAccessChain(lValue);
+ spv::Id leftRValue = accessChainLoad(node->getLeft()->getType());
+
+ // do the operation
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ rValue = createBinaryOperation(node->getOp(), decorations,
+ convertGlslangToSpvType(node->getType()), leftRValue, rValue,
+ node->getType().getBasicType());
+
+ // these all need their counterparts in createBinaryOperation()
+ assert(rValue != spv::NoResult);
+ }
+
+ // store the result
+ builder.setAccessChain(lValue);
+ multiTypeStore(node->getLeft()->getType(), rValue);
+
+ // assignments are expressions having an rValue after they are evaluated...
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(rValue);
+ }
+ return false;
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ {
+ // Get the left part of the access chain.
+ node->getLeft()->traverse(this);
+
+ // Add the next element in the chain
+
+ const int glslangIndex = node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (! node->getLeft()->getType().isArray() &&
+ node->getLeft()->getType().isVector() &&
+ node->getOp() == glslang::EOpIndexDirect) {
+ // This is essentially a hard-coded vector swizzle of size 1,
+ // so short circuit the access-chain stuff with a swizzle.
+ std::vector<unsigned> swizzle;
+ swizzle.push_back(glslangIndex);
+ int dummySize;
+ builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ } else {
+
+ // Load through a block reference is performed with a dot operator that
+ // is mapped to EOpIndexDirectStruct. When we get to the actual reference,
+ // do a load and reset the access chain.
+ if (node->getLeft()->getBasicType() == glslang::EbtReference &&
+ !node->getLeft()->getType().isArray() &&
+ node->getOp() == glslang::EOpIndexDirectStruct)
+ {
+ spv::Id left = accessChainLoad(node->getLeft()->getType());
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(left);
+ }
+
+ int spvIndex = glslangIndex;
+ if (node->getLeft()->getBasicType() == glslang::EbtBlock &&
+ node->getOp() == glslang::EOpIndexDirectStruct)
+ {
+ // This may be, e.g., an anonymous block-member selection, which generally need
+ // index remapping due to hidden members in anonymous blocks.
+ std::vector<int>& remapper = memberRemapper[node->getLeft()->getType().getStruct()];
+ assert(remapper.size() > 0);
+ spvIndex = remapper[glslangIndex];
+ }
+
+ // normal case for indexing array or structure or block
+ builder.accessChainPush(builder.makeIntConstant(spvIndex), TranslateCoherent(node->getLeft()->getType()), node->getLeft()->getType().getBufferReferenceAlignment());
+
+ // Add capabilities here for accessing PointSize and clip/cull distance.
+ // We have deferred generation of associated capabilities until now.
+ if (node->getLeft()->getType().isStruct() && ! node->getLeft()->getType().isArray())
+ declareUseOfStructMember(*(node->getLeft()->getType().getStruct()), glslangIndex);
+ }
+ }
+ return false;
+ case glslang::EOpIndexIndirect:
+ {
+ // Structure or array or vector indirection.
+ // Will use native SPIR-V access-chain for struct and array indirection;
+ // matrices are arrays of vectors, so will also work for a matrix.
+ // Will use the access chain's 'component' for variable index into a vector.
+
+ // This adapter is building access chains left to right.
+ // Set up the access chain to the left.
+ node->getLeft()->traverse(this);
+
+ // save it so that computing the right side doesn't trash it
+ spv::Builder::AccessChain partial = builder.getAccessChain();
+
+ // compute the next index in the chain
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id index = accessChainLoad(node->getRight()->getType());
+
+ addIndirectionIndexCapabilities(node->getLeft()->getType(), node->getRight()->getType());
+
+ // restore the saved access chain
+ builder.setAccessChain(partial);
+
+ if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) {
+ int dummySize;
+ builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ } else
+ builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()), node->getLeft()->getType().getBufferReferenceAlignment());
+ }
+ return false;
+ case glslang::EOpVectorSwizzle:
+ {
+ node->getLeft()->traverse(this);
+ std::vector<unsigned> swizzle;
+ convertSwizzle(*node->getRight()->getAsAggregate(), swizzle);
+ int dummySize;
+ builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ }
+ return false;
+ case glslang::EOpMatrixSwizzle:
+ logger->missingFunctionality("matrix swizzle");
+ return true;
+ case glslang::EOpLogicalOr:
+ case glslang::EOpLogicalAnd:
+ {
+
+ // These may require short circuiting, but can sometimes be done as straight
+ // binary operations. The right operand must be short circuited if it has
+ // side effects, and should probably be if it is complex.
+ if (isTrivial(node->getRight()->getAsTyped()))
+ break; // handle below as a normal binary operation
+ // otherwise, we need to do dynamic short circuiting on the right operand
+ spv::Id result = createShortCircuit(node->getOp(), *node->getLeft()->getAsTyped(), *node->getRight()->getAsTyped());
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ }
+ return false;
+ default:
+ break;
+ }
+
+ // Assume generic binary op...
+
+ // get right operand
+ builder.clearAccessChain();
+ node->getLeft()->traverse(this);
+ spv::Id left = accessChainLoad(node->getLeft()->getType());
+
+ // get left operand
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id right = accessChainLoad(node->getRight()->getType());
+
+ // get result
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ spv::Id result = createBinaryOperation(node->getOp(), decorations,
+ convertGlslangToSpvType(node->getType()), left, right,
+ node->getLeft()->getType().getBasicType());
+
+ builder.clearAccessChain();
+ if (! result) {
+ logger->missingFunctionality("unknown glslang binary operation");
+ return true; // pick up a child as the place-holder result
+ } else {
+ builder.setAccessChainRValue(result);
+ return false;
+ }
+}
+
+bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node)
+{
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id result = spv::NoResult;
+
+ // try texturing first
+ result = createImageTextureFunctionCall(node);
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false; // done with this node
+ }
+
+ // Non-texturing.
+
+ if (node->getOp() == glslang::EOpArrayLength) {
+ // Quite special; won't want to evaluate the operand.
+
+ // Currently, the front-end does not allow .length() on an array until it is sized,
+ // except for the last block membeor of an SSBO.
+ // TODO: If this changes, link-time sized arrays might show up here, and need their
+ // size extracted.
+
+ // Normal .length() would have been constant folded by the front-end.
+ // So, this has to be block.lastMember.length().
+ // SPV wants "block" and member number as the operands, go get them.
+
+ spv::Id length;
+ if (node->getOperand()->getType().isCoopMat()) {
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id typeId = convertGlslangToSpvType(node->getOperand()->getType());
+ assert(builder.isCooperativeMatrixType(typeId));
+
+ length = builder.createCooperativeMatrixLength(typeId);
+ } else {
+ glslang::TIntermTyped* block = node->getOperand()->getAsBinaryNode()->getLeft();
+ block->traverse(this);
+ unsigned int member = node->getOperand()->getAsBinaryNode()->getRight()->getAsConstantUnion()->getConstArray()[0].getUConst();
+ length = builder.createArrayLength(builder.accessChainGetLValue(), member);
+ }
+
+ // GLSL semantics say the result of .length() is an int, while SPIR-V says
+ // signedness must be 0. So, convert from SPIR-V unsigned back to GLSL's
+ // AST expectation of a signed result.
+ if (glslangIntermediate->getSource() == glslang::EShSourceGlsl) {
+ if (builder.isInSpecConstCodeGenMode()) {
+ length = builder.createBinOp(spv::OpIAdd, builder.makeIntType(32), length, builder.makeIntConstant(0));
+ } else {
+ length = builder.createUnaryOp(spv::OpBitcast, builder.makeIntType(32), length);
+ }
+ }
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(length);
+
+ return false;
+ }
+
+ // Start by evaluating the operand
+
+ // Does it need a swizzle inversion? If so, evaluation is inverted;
+ // operate first on the swizzle base, then apply the swizzle.
+ spv::Id invertedType = spv::NoType;
+ auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+ if (node->getOp() == glslang::EOpInterpolateAtCentroid)
+ invertedType = getInvertedSwizzleType(*node->getOperand());
+
+ builder.clearAccessChain();
+ if (invertedType != spv::NoType)
+ node->getOperand()->getAsBinaryNode()->getLeft()->traverse(this);
+ else
+ node->getOperand()->traverse(this);
+
+ spv::Id operand = spv::NoResult;
+
+ if (node->getOp() == glslang::EOpAtomicCounterIncrement ||
+ node->getOp() == glslang::EOpAtomicCounterDecrement ||
+ node->getOp() == glslang::EOpAtomicCounter ||
+ node->getOp() == glslang::EOpInterpolateAtCentroid)
+ operand = builder.accessChainGetLValue(); // Special case l-value operands
+ else
+ operand = accessChainLoad(node->getOperand()->getType());
+
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+
+ // it could be a conversion
+ if (! result)
+ result = createConversion(node->getOp(), decorations, resultType(), operand, node->getOperand()->getBasicType());
+
+ // if not, then possibly an operation
+ if (! result)
+ result = createUnaryOperation(node->getOp(), decorations, resultType(), operand, node->getOperand()->getBasicType());
+
+ if (result) {
+ if (invertedType) {
+ result = createInvertedSwizzle(decorations.precision, *node->getOperand(), result);
+ builder.addDecoration(result, decorations.nonUniform);
+ }
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false; // done with this node
+ }
+
+ // it must be a special case, check...
+ switch (node->getOp()) {
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ {
+ // we need the integer value "1" or the floating point "1.0" to add/subtract
+ spv::Id one = 0;
+ if (node->getBasicType() == glslang::EbtFloat)
+ one = builder.makeFloatConstant(1.0F);
+ else if (node->getBasicType() == glslang::EbtDouble)
+ one = builder.makeDoubleConstant(1.0);
+ else if (node->getBasicType() == glslang::EbtFloat16)
+ one = builder.makeFloat16Constant(1.0F);
+ else if (node->getBasicType() == glslang::EbtInt8 || node->getBasicType() == glslang::EbtUint8)
+ one = builder.makeInt8Constant(1);
+ else if (node->getBasicType() == glslang::EbtInt16 || node->getBasicType() == glslang::EbtUint16)
+ one = builder.makeInt16Constant(1);
+ else if (node->getBasicType() == glslang::EbtInt64 || node->getBasicType() == glslang::EbtUint64)
+ one = builder.makeInt64Constant(1);
+ else
+ one = builder.makeIntConstant(1);
+ glslang::TOperator op;
+ if (node->getOp() == glslang::EOpPreIncrement ||
+ node->getOp() == glslang::EOpPostIncrement)
+ op = glslang::EOpAdd;
+ else
+ op = glslang::EOpSub;
+
+ spv::Id result = createBinaryOperation(op, decorations,
+ convertGlslangToSpvType(node->getType()), operand, one,
+ node->getType().getBasicType());
+ assert(result != spv::NoResult);
+
+ // The result of operation is always stored, but conditionally the
+ // consumed result. The consumed result is always an r-value.
+ builder.accessChainStore(result);
+ builder.clearAccessChain();
+ if (node->getOp() == glslang::EOpPreIncrement ||
+ node->getOp() == glslang::EOpPreDecrement)
+ builder.setAccessChainRValue(result);
+ else
+ builder.setAccessChainRValue(operand);
+ }
+
+ return false;
+
+ case glslang::EOpEmitStreamVertex:
+ builder.createNoResultOp(spv::OpEmitStreamVertex, operand);
+ return false;
+ case glslang::EOpEndStreamPrimitive:
+ builder.createNoResultOp(spv::OpEndStreamPrimitive, operand);
+ return false;
+
+ default:
+ logger->missingFunctionality("unknown glslang unary");
+ return true; // pick up operand as placeholder result
+ }
+}
+
+bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TIntermAggregate* node)
+{
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id result = spv::NoResult;
+ spv::Id invertedType = spv::NoType; // to use to override the natural type of the node
+ auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+
+ // try texturing
+ result = createImageTextureFunctionCall(node);
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false;
+ } else if (node->getOp() == glslang::EOpImageStore ||
+#ifdef AMD_EXTENSIONS
+ node->getOp() == glslang::EOpImageStoreLod ||
+#endif
+ node->getOp() == glslang::EOpImageAtomicStore) {
+ // "imageStore" is a special case, which has no result
+ return false;
+ }
+
+ glslang::TOperator binOp = glslang::EOpNull;
+ bool reduceComparison = true;
+ bool isMatrix = false;
+ bool noReturnValue = false;
+ bool atomic = false;
+
+ assert(node->getOp());
+
+ spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+ switch (node->getOp()) {
+ case glslang::EOpSequence:
+ {
+ if (preVisit)
+ ++sequenceDepth;
+ else
+ --sequenceDepth;
+
+ if (sequenceDepth == 1) {
+ // If this is the parent node of all the functions, we want to see them
+ // early, so all call points have actual SPIR-V functions to reference.
+ // In all cases, still let the traverser visit the children for us.
+ makeFunctions(node->getAsAggregate()->getSequence());
+
+ // Also, we want all globals initializers to go into the beginning of the entry point, before
+ // anything else gets there, so visit out of order, doing them all now.
+ makeGlobalInitializers(node->getAsAggregate()->getSequence());
+
+ // Initializers are done, don't want to visit again, but functions and link objects need to be processed,
+ // so do them manually.
+ visitFunctions(node->getAsAggregate()->getSequence());
+
+ return false;
+ }
+
+ return true;
+ }
+ case glslang::EOpLinkerObjects:
+ {
+ if (visit == glslang::EvPreVisit)
+ linkageOnly = true;
+ else
+ linkageOnly = false;
+
+ return true;
+ }
+ case glslang::EOpComma:
+ {
+ // processing from left to right naturally leaves the right-most
+ // lying around in the access chain
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ for (int i = 0; i < (int)glslangOperands.size(); ++i)
+ glslangOperands[i]->traverse(this);
+
+ return false;
+ }
+ case glslang::EOpFunction:
+ if (visit == glslang::EvPreVisit) {
+ if (isShaderEntryPoint(node)) {
+ inEntryPoint = true;
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ currentFunction = shaderEntry;
+ } else {
+ handleFunctionEntry(node);
+ }
+ } else {
+ if (inEntryPoint)
+ entryPointTerminated = true;
+ builder.leaveFunction();
+ inEntryPoint = false;
+ }
+
+ return true;
+ case glslang::EOpParameters:
+ // Parameters will have been consumed by EOpFunction processing, but not
+ // the body, so we still visited the function node's children, making this
+ // child redundant.
+ return false;
+ case glslang::EOpFunctionCall:
+ {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ if (node->isUserDefined())
+ result = handleUserFunctionCall(node);
+ // assert(result); // this can happen for bad shaders because the call graph completeness checking is not yet done
+ if (result) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ } else
+ logger->missingFunctionality("missing user function; linker needs to catch that");
+
+ return false;
+ }
+ case glslang::EOpConstructMat2x2:
+ case glslang::EOpConstructMat2x3:
+ case glslang::EOpConstructMat2x4:
+ case glslang::EOpConstructMat3x2:
+ case glslang::EOpConstructMat3x3:
+ case glslang::EOpConstructMat3x4:
+ case glslang::EOpConstructMat4x2:
+ case glslang::EOpConstructMat4x3:
+ case glslang::EOpConstructMat4x4:
+ case glslang::EOpConstructDMat2x2:
+ case glslang::EOpConstructDMat2x3:
+ case glslang::EOpConstructDMat2x4:
+ case glslang::EOpConstructDMat3x2:
+ case glslang::EOpConstructDMat3x3:
+ case glslang::EOpConstructDMat3x4:
+ case glslang::EOpConstructDMat4x2:
+ case glslang::EOpConstructDMat4x3:
+ case glslang::EOpConstructDMat4x4:
+ case glslang::EOpConstructIMat2x2:
+ case glslang::EOpConstructIMat2x3:
+ case glslang::EOpConstructIMat2x4:
+ case glslang::EOpConstructIMat3x2:
+ case glslang::EOpConstructIMat3x3:
+ case glslang::EOpConstructIMat3x4:
+ case glslang::EOpConstructIMat4x2:
+ case glslang::EOpConstructIMat4x3:
+ case glslang::EOpConstructIMat4x4:
+ case glslang::EOpConstructUMat2x2:
+ case glslang::EOpConstructUMat2x3:
+ case glslang::EOpConstructUMat2x4:
+ case glslang::EOpConstructUMat3x2:
+ case glslang::EOpConstructUMat3x3:
+ case glslang::EOpConstructUMat3x4:
+ case glslang::EOpConstructUMat4x2:
+ case glslang::EOpConstructUMat4x3:
+ case glslang::EOpConstructUMat4x4:
+ case glslang::EOpConstructBMat2x2:
+ case glslang::EOpConstructBMat2x3:
+ case glslang::EOpConstructBMat2x4:
+ case glslang::EOpConstructBMat3x2:
+ case glslang::EOpConstructBMat3x3:
+ case glslang::EOpConstructBMat3x4:
+ case glslang::EOpConstructBMat4x2:
+ case glslang::EOpConstructBMat4x3:
+ case glslang::EOpConstructBMat4x4:
+ case glslang::EOpConstructF16Mat2x2:
+ case glslang::EOpConstructF16Mat2x3:
+ case glslang::EOpConstructF16Mat2x4:
+ case glslang::EOpConstructF16Mat3x2:
+ case glslang::EOpConstructF16Mat3x3:
+ case glslang::EOpConstructF16Mat3x4:
+ case glslang::EOpConstructF16Mat4x2:
+ case glslang::EOpConstructF16Mat4x3:
+ case glslang::EOpConstructF16Mat4x4:
+ isMatrix = true;
+ // fall through
+ case glslang::EOpConstructFloat:
+ case glslang::EOpConstructVec2:
+ case glslang::EOpConstructVec3:
+ case glslang::EOpConstructVec4:
+ case glslang::EOpConstructDouble:
+ case glslang::EOpConstructDVec2:
+ case glslang::EOpConstructDVec3:
+ case glslang::EOpConstructDVec4:
+ case glslang::EOpConstructFloat16:
+ case glslang::EOpConstructF16Vec2:
+ case glslang::EOpConstructF16Vec3:
+ case glslang::EOpConstructF16Vec4:
+ case glslang::EOpConstructBool:
+ case glslang::EOpConstructBVec2:
+ case glslang::EOpConstructBVec3:
+ case glslang::EOpConstructBVec4:
+ case glslang::EOpConstructInt8:
+ case glslang::EOpConstructI8Vec2:
+ case glslang::EOpConstructI8Vec3:
+ case glslang::EOpConstructI8Vec4:
+ case glslang::EOpConstructUint8:
+ case glslang::EOpConstructU8Vec2:
+ case glslang::EOpConstructU8Vec3:
+ case glslang::EOpConstructU8Vec4:
+ case glslang::EOpConstructInt16:
+ case glslang::EOpConstructI16Vec2:
+ case glslang::EOpConstructI16Vec3:
+ case glslang::EOpConstructI16Vec4:
+ case glslang::EOpConstructUint16:
+ case glslang::EOpConstructU16Vec2:
+ case glslang::EOpConstructU16Vec3:
+ case glslang::EOpConstructU16Vec4:
+ case glslang::EOpConstructInt:
+ case glslang::EOpConstructIVec2:
+ case glslang::EOpConstructIVec3:
+ case glslang::EOpConstructIVec4:
+ case glslang::EOpConstructUint:
+ case glslang::EOpConstructUVec2:
+ case glslang::EOpConstructUVec3:
+ case glslang::EOpConstructUVec4:
+ case glslang::EOpConstructInt64:
+ case glslang::EOpConstructI64Vec2:
+ case glslang::EOpConstructI64Vec3:
+ case glslang::EOpConstructI64Vec4:
+ case glslang::EOpConstructUint64:
+ case glslang::EOpConstructU64Vec2:
+ case glslang::EOpConstructU64Vec3:
+ case glslang::EOpConstructU64Vec4:
+ case glslang::EOpConstructStruct:
+ case glslang::EOpConstructTextureSampler:
+ case glslang::EOpConstructReference:
+ case glslang::EOpConstructCooperativeMatrix:
+ {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ std::vector<spv::Id> arguments;
+ translateArguments(*node, arguments);
+ spv::Id constructed;
+ if (node->getOp() == glslang::EOpConstructTextureSampler)
+ constructed = builder.createOp(spv::OpSampledImage, resultType(), arguments);
+ else if (node->getOp() == glslang::EOpConstructStruct ||
+ node->getOp() == glslang::EOpConstructCooperativeMatrix ||
+ node->getType().isArray()) {
+ std::vector<spv::Id> constituents;
+ for (int c = 0; c < (int)arguments.size(); ++c)
+ constituents.push_back(arguments[c]);
+ constructed = builder.createCompositeConstruct(resultType(), constituents);
+ } else if (isMatrix)
+ constructed = builder.createMatrixConstructor(precision, arguments, resultType());
+ else
+ constructed = builder.createConstructor(precision, arguments, resultType());
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(constructed);
+
+ return false;
+ }
+
+ // These six are component-wise compares with component-wise results.
+ // Forward on to createBinaryOperation(), requesting a vector result.
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpVectorEqual:
+ case glslang::EOpVectorNotEqual:
+ {
+ // Map the operation to a binary
+ binOp = node->getOp();
+ reduceComparison = false;
+ switch (node->getOp()) {
+ case glslang::EOpVectorEqual: binOp = glslang::EOpVectorEqual; break;
+ case glslang::EOpVectorNotEqual: binOp = glslang::EOpVectorNotEqual; break;
+ default: binOp = node->getOp(); break;
+ }
+
+ break;
+ }
+ case glslang::EOpMul:
+ // component-wise matrix multiply
+ binOp = glslang::EOpMul;
+ break;
+ case glslang::EOpOuterProduct:
+ // two vectors multiplied to make a matrix
+ binOp = glslang::EOpOuterProduct;
+ break;
+ case glslang::EOpDot:
+ {
+ // for scalar dot product, use multiply
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ if (glslangOperands[0]->getAsTyped()->getVectorSize() == 1)
+ binOp = glslang::EOpMul;
+ break;
+ }
+ case glslang::EOpMod:
+ // when an aggregate, this is the floating-point mod built-in function,
+ // which can be emitted by the one in createBinaryOperation()
+ binOp = glslang::EOpMod;
+ break;
+ case glslang::EOpEmitVertex:
+ case glslang::EOpEndPrimitive:
+ case glslang::EOpBarrier:
+ case glslang::EOpMemoryBarrier:
+ case glslang::EOpMemoryBarrierAtomicCounter:
+ case glslang::EOpMemoryBarrierBuffer:
+ case glslang::EOpMemoryBarrierImage:
+ case glslang::EOpMemoryBarrierShared:
+ case glslang::EOpGroupMemoryBarrier:
+ case glslang::EOpDeviceMemoryBarrier:
+ case glslang::EOpAllMemoryBarrierWithGroupSync:
+ case glslang::EOpDeviceMemoryBarrierWithGroupSync:
+ case glslang::EOpWorkgroupMemoryBarrier:
+ case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+ case glslang::EOpSubgroupBarrier:
+ case glslang::EOpSubgroupMemoryBarrier:
+ case glslang::EOpSubgroupMemoryBarrierBuffer:
+ case glslang::EOpSubgroupMemoryBarrierImage:
+ case glslang::EOpSubgroupMemoryBarrierShared:
+ noReturnValue = true;
+ // These all have 0 operands and will naturally finish up in the code below for 0 operands
+ break;
+
+ case glslang::EOpAtomicStore:
+ noReturnValue = true;
+ // fallthrough
+ case glslang::EOpAtomicLoad:
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpAtomicMin:
+ case glslang::EOpAtomicMax:
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpAtomicOr:
+ case glslang::EOpAtomicXor:
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpAtomicCompSwap:
+ atomic = true;
+ break;
+
+ case glslang::EOpAtomicCounterAdd:
+ case glslang::EOpAtomicCounterSubtract:
+ case glslang::EOpAtomicCounterMin:
+ case glslang::EOpAtomicCounterMax:
+ case glslang::EOpAtomicCounterAnd:
+ case glslang::EOpAtomicCounterOr:
+ case glslang::EOpAtomicCounterXor:
+ case glslang::EOpAtomicCounterExchange:
+ case glslang::EOpAtomicCounterCompSwap:
+ builder.addExtension("SPV_KHR_shader_atomic_counter_ops");
+ builder.addCapability(spv::CapabilityAtomicStorageOps);
+ atomic = true;
+ break;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EOpIgnoreIntersectionNV:
+ case glslang::EOpTerminateRayNV:
+ case glslang::EOpTraceNV:
+ case glslang::EOpExecuteCallableNV:
+ case glslang::EOpWritePackedPrimitiveIndices4x8NV:
+ noReturnValue = true;
+ break;
+#endif
+ case glslang::EOpCooperativeMatrixLoad:
+ case glslang::EOpCooperativeMatrixStore:
+ noReturnValue = true;
+ break;
+
+ default:
+ break;
+ }
+
+ //
+ // See if it maps to a regular operation.
+ //
+ if (binOp != glslang::EOpNull) {
+ glslang::TIntermTyped* left = node->getSequence()[0]->getAsTyped();
+ glslang::TIntermTyped* right = node->getSequence()[1]->getAsTyped();
+ assert(left && right);
+
+ builder.clearAccessChain();
+ left->traverse(this);
+ spv::Id leftId = accessChainLoad(left->getType());
+
+ builder.clearAccessChain();
+ right->traverse(this);
+ spv::Id rightId = accessChainLoad(right->getType());
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ OpDecorations decorations = { precision,
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ result = createBinaryOperation(binOp, decorations,
+ resultType(), leftId, rightId,
+ left->getType().getBasicType(), reduceComparison);
+
+ // code above should only make binOp that exists in createBinaryOperation
+ assert(result != spv::NoResult);
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false;
+ }
+
+ //
+ // Create the list of operands.
+ //
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ std::vector<spv::Id> operands;
+ std::vector<spv::IdImmediate> memoryAccessOperands;
+ for (int arg = 0; arg < (int)glslangOperands.size(); ++arg) {
+ // special case l-value operands; there are just a few
+ bool lvalue = false;
+ switch (node->getOp()) {
+ case glslang::EOpFrexp:
+ case glslang::EOpModf:
+ if (arg == 1)
+ lvalue = true;
+ break;
+ case glslang::EOpInterpolateAtSample:
+ case glslang::EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpInterpolateAtVertex:
+#endif
+ if (arg == 0) {
+ lvalue = true;
+
+ // Does it need a swizzle inversion? If so, evaluation is inverted;
+ // operate first on the swizzle base, then apply the swizzle.
+ if (glslangOperands[0]->getAsOperator() &&
+ glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+ invertedType = convertGlslangToSpvType(glslangOperands[0]->getAsBinaryNode()->getLeft()->getType());
+ }
+ break;
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpAtomicMin:
+ case glslang::EOpAtomicMax:
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpAtomicOr:
+ case glslang::EOpAtomicXor:
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpAtomicCompSwap:
+ case glslang::EOpAtomicLoad:
+ case glslang::EOpAtomicStore:
+ case glslang::EOpAtomicCounterAdd:
+ case glslang::EOpAtomicCounterSubtract:
+ case glslang::EOpAtomicCounterMin:
+ case glslang::EOpAtomicCounterMax:
+ case glslang::EOpAtomicCounterAnd:
+ case glslang::EOpAtomicCounterOr:
+ case glslang::EOpAtomicCounterXor:
+ case glslang::EOpAtomicCounterExchange:
+ case glslang::EOpAtomicCounterCompSwap:
+ if (arg == 0)
+ lvalue = true;
+ break;
+ case glslang::EOpAddCarry:
+ case glslang::EOpSubBorrow:
+ if (arg == 2)
+ lvalue = true;
+ break;
+ case glslang::EOpUMulExtended:
+ case glslang::EOpIMulExtended:
+ if (arg >= 2)
+ lvalue = true;
+ break;
+ case glslang::EOpCooperativeMatrixLoad:
+ if (arg == 0 || arg == 1)
+ lvalue = true;
+ break;
+ case glslang::EOpCooperativeMatrixStore:
+ if (arg == 1)
+ lvalue = true;
+ break;
+ default:
+ break;
+ }
+ builder.clearAccessChain();
+ if (invertedType != spv::NoType && arg == 0)
+ glslangOperands[0]->getAsBinaryNode()->getLeft()->traverse(this);
+ else
+ glslangOperands[arg]->traverse(this);
+
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad ||
+ node->getOp() == glslang::EOpCooperativeMatrixStore) {
+
+ if (arg == 1) {
+ // fold "element" parameter into the access chain
+ spv::Builder::AccessChain save = builder.getAccessChain();
+ builder.clearAccessChain();
+ glslangOperands[2]->traverse(this);
+
+ spv::Id elementId = accessChainLoad(glslangOperands[2]->getAsTyped()->getType());
+
+ builder.setAccessChain(save);
+
+ // Point to the first element of the array.
+ builder.accessChainPush(elementId, TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()),
+ glslangOperands[arg]->getAsTyped()->getType().getBufferReferenceAlignment());
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ unsigned int alignment = builder.getAccessChain().alignment;
+
+ int memoryAccess = TranslateMemoryAccess(coherentFlags);
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad)
+ memoryAccess &= ~spv::MemoryAccessMakePointerAvailableKHRMask;
+ if (node->getOp() == glslang::EOpCooperativeMatrixStore)
+ memoryAccess &= ~spv::MemoryAccessMakePointerVisibleKHRMask;
+ if (builder.getStorageClass(builder.getAccessChain().base) == spv::StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ memoryAccessOperands.push_back(spv::IdImmediate(false, memoryAccess));
+
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ memoryAccessOperands.push_back(spv::IdImmediate(false, alignment));
+ }
+
+ if (memoryAccess & (spv::MemoryAccessMakePointerAvailableKHRMask | spv::MemoryAccessMakePointerVisibleKHRMask)) {
+ memoryAccessOperands.push_back(spv::IdImmediate(true, builder.makeUintConstant(TranslateMemoryScope(coherentFlags))));
+ }
+ } else if (arg == 2) {
+ continue;
+ }
+ }
+
+ if (lvalue)
+ operands.push_back(builder.accessChainGetLValue());
+ else {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType()));
+ }
+ }
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad) {
+ std::vector<spv::IdImmediate> idImmOps;
+
+ idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf
+ idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride
+ idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor
+ idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end());
+ // get the pointee type
+ spv::Id typeId = builder.getContainedTypeId(builder.getTypeId(operands[0]));
+ assert(builder.isCooperativeMatrixType(typeId));
+ // do the op
+ spv::Id result = builder.createOp(spv::OpCooperativeMatrixLoadNV, typeId, idImmOps);
+ // store the result to the pointer (out param 'm')
+ builder.createStore(result, operands[0]);
+ result = 0;
+ } else if (node->getOp() == glslang::EOpCooperativeMatrixStore) {
+ std::vector<spv::IdImmediate> idImmOps;
+
+ idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf
+ idImmOps.push_back(spv::IdImmediate(true, operands[0])); // object
+ idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride
+ idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor
+ idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end());
+
+ builder.createNoResultOp(spv::OpCooperativeMatrixStoreNV, idImmOps);
+ result = 0;
+ } else if (atomic) {
+ // Handle all atomics
+ result = createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ } else {
+ // Pass through to generic operations.
+ switch (glslangOperands.size()) {
+ case 0:
+ result = createNoArgOperation(node->getOp(), precision, resultType());
+ break;
+ case 1:
+ {
+ OpDecorations decorations = { precision,
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ result = createUnaryOperation(
+ node->getOp(), decorations,
+ resultType(), operands.front(),
+ glslangOperands[0]->getAsTyped()->getBasicType());
+ }
+ break;
+ default:
+ result = createMiscOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ break;
+ }
+ if (invertedType)
+ result = createInvertedSwizzle(precision, *glslangOperands[0]->getAsBinaryNode(), result);
+ }
+
+ if (noReturnValue)
+ return false;
+
+ if (! result) {
+ logger->missingFunctionality("unknown glslang aggregate");
+ return true; // pick up a child as a placeholder operand
+ } else {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ return false;
+ }
+}
+
+// This path handles both if-then-else and ?:
+// The if-then-else has a node type of void, while
+// ?: has either a void or a non-void node type
+//
+// Leaving the result, when not void:
+// GLSL only has r-values as the result of a :?, but
+// if we have an l-value, that can be more efficient if it will
+// become the base of a complex r-value expression, because the
+// next layer copies r-values into memory to use the access-chain mechanism
+bool TGlslangToSpvTraverser::visitSelection(glslang::TVisit /* visit */, glslang::TIntermSelection* node)
+{
+ // See if it simple and safe, or required, to execute both sides.
+ // Crucially, side effects must be either semantically required or avoided,
+ // and there are performance trade-offs.
+ // Return true if required or a good idea (and safe) to execute both sides,
+ // false otherwise.
+ const auto bothSidesPolicy = [&]() -> bool {
+ // do we have both sides?
+ if (node->getTrueBlock() == nullptr ||
+ node->getFalseBlock() == nullptr)
+ return false;
+
+ // required? (unless we write additional code to look for side effects
+ // and make performance trade-offs if none are present)
+ if (!node->getShortCircuit())
+ return true;
+
+ // if not required to execute both, decide based on performance/practicality...
+
+ // see if OpSelect can handle it
+ if ((!node->getType().isScalar() && !node->getType().isVector()) ||
+ node->getBasicType() == glslang::EbtVoid)
+ return false;
+
+ assert(node->getType() == node->getTrueBlock() ->getAsTyped()->getType() &&
+ node->getType() == node->getFalseBlock()->getAsTyped()->getType());
+
+ // return true if a single operand to ? : is okay for OpSelect
+ const auto operandOkay = [](glslang::TIntermTyped* node) {
+ return node->getAsSymbolNode() || node->getType().getQualifier().isConstant();
+ };
+
+ return operandOkay(node->getTrueBlock() ->getAsTyped()) &&
+ operandOkay(node->getFalseBlock()->getAsTyped());
+ };
+
+ spv::Id result = spv::NoResult; // upcoming result selecting between trueValue and falseValue
+ // emit the condition before doing anything with selection
+ node->getCondition()->traverse(this);
+ spv::Id condition = accessChainLoad(node->getCondition()->getType());
+
+ // Find a way of executing both sides and selecting the right result.
+ const auto executeBothSides = [&]() -> void {
+ // execute both sides
+ node->getTrueBlock()->traverse(this);
+ spv::Id trueValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType());
+ node->getFalseBlock()->traverse(this);
+ spv::Id falseValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType());
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ // done if void
+ if (node->getBasicType() == glslang::EbtVoid)
+ return;
+
+ // emit code to select between trueValue and falseValue
+
+ // see if OpSelect can handle it
+ if (node->getType().isScalar() || node->getType().isVector()) {
+ // Emit OpSelect for this selection.
+
+ // smear condition to vector, if necessary (AST is always scalar)
+ if (builder.isVector(trueValue))
+ condition = builder.smearScalar(spv::NoPrecision, condition,
+ builder.makeVectorType(builder.makeBoolType(),
+ builder.getNumComponents(trueValue)));
+
+ // OpSelect
+ result = builder.createTriOp(spv::OpSelect,
+ convertGlslangToSpvType(node->getType()), condition,
+ trueValue, falseValue);
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ } else {
+ // We need control flow to select the result.
+ // TODO: Once SPIR-V OpSelect allows arbitrary types, eliminate this path.
+ result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType()));
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSelectionControl(*node);
+
+ // make an "if" based on the value created by the condition
+ spv::Builder::If ifBuilder(condition, control, builder);
+
+ // emit the "then" statement
+ builder.createStore(trueValue, result);
+ ifBuilder.makeBeginElse();
+ // emit the "else" statement
+ builder.createStore(falseValue, result);
+
+ // finish off the control flow
+ ifBuilder.makeEndIf();
+
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(result);
+ }
+ };
+
+ // Execute the one side needed, as per the condition
+ const auto executeOneSide = [&]() {
+ // Always emit control flow.
+ if (node->getBasicType() != glslang::EbtVoid)
+ result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType()));
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSelectionControl(*node);
+
+ // make an "if" based on the value created by the condition
+ spv::Builder::If ifBuilder(condition, control, builder);
+
+ // emit the "then" statement
+ if (node->getTrueBlock() != nullptr) {
+ node->getTrueBlock()->traverse(this);
+ if (result != spv::NoResult)
+ builder.createStore(accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()), result);
+ }
+
+ if (node->getFalseBlock() != nullptr) {
+ ifBuilder.makeBeginElse();
+ // emit the "else" statement
+ node->getFalseBlock()->traverse(this);
+ if (result != spv::NoResult)
+ builder.createStore(accessChainLoad(node->getFalseBlock()->getAsTyped()->getType()), result);
+ }
+
+ // finish off the control flow
+ ifBuilder.makeEndIf();
+
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(result);
+ }
+ };
+
+ // Try for OpSelect (or a requirement to execute both sides)
+ if (bothSidesPolicy()) {
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+ executeBothSides();
+ } else
+ executeOneSide();
+
+ return false;
+}
+
+bool TGlslangToSpvTraverser::visitSwitch(glslang::TVisit /* visit */, glslang::TIntermSwitch* node)
+{
+ // emit and get the condition before doing anything with switch
+ node->getCondition()->traverse(this);
+ spv::Id selector = accessChainLoad(node->getCondition()->getAsTyped()->getType());
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSwitchControl(*node);
+
+ // browse the children to sort out code segments
+ int defaultSegment = -1;
+ std::vector<TIntermNode*> codeSegments;
+ glslang::TIntermSequence& sequence = node->getBody()->getSequence();
+ std::vector<int> caseValues;
+ std::vector<int> valueIndexToSegment(sequence.size()); // note: probably not all are used, it is an overestimate
+ for (glslang::TIntermSequence::iterator c = sequence.begin(); c != sequence.end(); ++c) {
+ TIntermNode* child = *c;
+ if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpDefault)
+ defaultSegment = (int)codeSegments.size();
+ else if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpCase) {
+ valueIndexToSegment[caseValues.size()] = (int)codeSegments.size();
+ caseValues.push_back(child->getAsBranchNode()->getExpression()->getAsConstantUnion()->getConstArray()[0].getIConst());
+ } else
+ codeSegments.push_back(child);
+ }
+
+ // handle the case where the last code segment is missing, due to no code
+ // statements between the last case and the end of the switch statement
+ if ((caseValues.size() && (int)codeSegments.size() == valueIndexToSegment[caseValues.size() - 1]) ||
+ (int)codeSegments.size() == defaultSegment)
+ codeSegments.push_back(nullptr);
+
+ // make the switch statement
+ std::vector<spv::Block*> segmentBlocks; // returned, as the blocks allocated in the call
+ builder.makeSwitch(selector, control, (int)codeSegments.size(), caseValues, valueIndexToSegment, defaultSegment, segmentBlocks);
+
+ // emit all the code in the segments
+ breakForLoop.push(false);
+ for (unsigned int s = 0; s < codeSegments.size(); ++s) {
+ builder.nextSwitchSegment(segmentBlocks, s);
+ if (codeSegments[s])
+ codeSegments[s]->traverse(this);
+ else
+ builder.addSwitchBreak();
+ }
+ breakForLoop.pop();
+
+ builder.endSwitch(segmentBlocks);
+
+ return false;
+}
+
+void TGlslangToSpvTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node)
+{
+ int nextConst = 0;
+ spv::Id constant = createSpvConstantFromConstUnionArray(node->getType(), node->getConstArray(), nextConst, false);
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(constant);
+}
+
+bool TGlslangToSpvTraverser::visitLoop(glslang::TVisit /* visit */, glslang::TIntermLoop* node)
+{
+ auto blocks = builder.makeNewLoop();
+ builder.createBranch(&blocks.head);
+
+ // Loop control:
+ unsigned int dependencyLength = glslang::TIntermLoop::dependencyInfinite;
+ const spv::LoopControlMask control = TranslateLoopControl(*node, dependencyLength);
+
+ // Spec requires back edges to target header blocks, and every header block
+ // must dominate its merge block. Make a header block first to ensure these
+ // conditions are met. By definition, it will contain OpLoopMerge, followed
+ // by a block-ending branch. But we don't want to put any other body/test
+ // instructions in it, since the body/test may have arbitrary instructions,
+ // including merges of its own.
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ builder.setBuildPoint(&blocks.head);
+ builder.createLoopMerge(&blocks.merge, &blocks.continue_target, control, dependencyLength);
+ if (node->testFirst() && node->getTest()) {
+ spv::Block& test = builder.makeNewBlock();
+ builder.createBranch(&test);
+
+ builder.setBuildPoint(&test);
+ node->getTest()->traverse(this);
+ spv::Id condition = accessChainLoad(node->getTest()->getType());
+ builder.createConditionalBranch(condition, &blocks.body, &blocks.merge);
+
+ builder.setBuildPoint(&blocks.body);
+ breakForLoop.push(true);
+ if (node->getBody())
+ node->getBody()->traverse(this);
+ builder.createBranch(&blocks.continue_target);
+ breakForLoop.pop();
+
+ builder.setBuildPoint(&blocks.continue_target);
+ if (node->getTerminal())
+ node->getTerminal()->traverse(this);
+ builder.createBranch(&blocks.head);
+ } else {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ builder.createBranch(&blocks.body);
+
+ breakForLoop.push(true);
+ builder.setBuildPoint(&blocks.body);
+ if (node->getBody())
+ node->getBody()->traverse(this);
+ builder.createBranch(&blocks.continue_target);
+ breakForLoop.pop();
+
+ builder.setBuildPoint(&blocks.continue_target);
+ if (node->getTerminal())
+ node->getTerminal()->traverse(this);
+ if (node->getTest()) {
+ node->getTest()->traverse(this);
+ spv::Id condition =
+ accessChainLoad(node->getTest()->getType());
+ builder.createConditionalBranch(condition, &blocks.head, &blocks.merge);
+ } else {
+ // TODO: unless there was a break/return/discard instruction
+ // somewhere in the body, this is an infinite loop, so we should
+ // issue a warning.
+ builder.createBranch(&blocks.head);
+ }
+ }
+ builder.setBuildPoint(&blocks.merge);
+ builder.closeLoop();
+ return false;
+}
+
+bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::TIntermBranch* node)
+{
+ if (node->getExpression())
+ node->getExpression()->traverse(this);
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ switch (node->getFlowOp()) {
+ case glslang::EOpKill:
+ builder.makeDiscard();
+ break;
+ case glslang::EOpBreak:
+ if (breakForLoop.top())
+ builder.createLoopExit();
+ else
+ builder.addSwitchBreak();
+ break;
+ case glslang::EOpContinue:
+ builder.createLoopContinue();
+ break;
+ case glslang::EOpReturn:
+ if (node->getExpression()) {
+ const glslang::TType& glslangReturnType = node->getExpression()->getType();
+ spv::Id returnId = accessChainLoad(glslangReturnType);
+ if (builder.getTypeId(returnId) != currentFunction->getReturnType()) {
+ builder.clearAccessChain();
+ spv::Id copyId = builder.createVariable(spv::StorageClassFunction, currentFunction->getReturnType());
+ builder.setAccessChainLValue(copyId);
+ multiTypeStore(glslangReturnType, returnId);
+ returnId = builder.createLoad(copyId);
+ }
+ builder.makeReturn(false, returnId);
+ } else
+ builder.makeReturn(false);
+
+ builder.clearAccessChain();
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ return false;
+}
+
+spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol* node)
+{
+ // First, steer off constants, which are not SPIR-V variables, but
+ // can still have a mapping to a SPIR-V Id.
+ // This includes specialization constants.
+ if (node->getQualifier().isConstant()) {
+ spv::Id result = createSpvConstant(*node);
+ if (result != spv::NoResult)
+ return result;
+ }
+
+ // Now, handle actual variables
+ spv::StorageClass storageClass = TranslateStorageClass(node->getType());
+ spv::Id spvType = convertGlslangToSpvType(node->getType());
+
+ const bool contains16BitType = node->getType().containsBasicType(glslang::EbtFloat16) ||
+ node->getType().containsBasicType(glslang::EbtInt16) ||
+ node->getType().containsBasicType(glslang::EbtUint16);
+ if (contains16BitType) {
+ switch (storageClass) {
+ case spv::StorageClassInput:
+ case spv::StorageClassOutput:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStorageInputOutput16);
+ break;
+ case spv::StorageClassPushConstant:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStoragePushConstant16);
+ break;
+ case spv::StorageClassUniform:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ if (node->getType().getQualifier().storage == glslang::EvqBuffer)
+ builder.addCapability(spv::CapabilityStorageUniformBufferBlock16);
+ else
+ builder.addCapability(spv::CapabilityStorageUniform16);
+ break;
+ case spv::StorageClassStorageBuffer:
+ case spv::StorageClassPhysicalStorageBufferEXT:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStorageUniformBufferBlock16);
+ break;
+ default:
+ break;
+ }
+ }
+
+ const bool contains8BitType = node->getType().containsBasicType(glslang::EbtInt8) ||
+ node->getType().containsBasicType(glslang::EbtUint8);
+ if (contains8BitType) {
+ if (storageClass == spv::StorageClassPushConstant) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityStoragePushConstant8);
+ } else if (storageClass == spv::StorageClassUniform) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityUniformAndStorageBuffer8BitAccess);
+ } else if (storageClass == spv::StorageClassStorageBuffer) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityStorageBuffer8BitAccess);
+ }
+ }
+
+ const char* name = node->getName().c_str();
+ if (glslang::IsAnonymous(name))
+ name = "";
+
+ return builder.createVariable(storageClass, spvType, name);
+}
+
+// Return type Id of the sampled type.
+spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler)
+{
+ switch (sampler.type) {
+ case glslang::EbtFloat: return builder.makeFloatType(32);
+#ifdef AMD_EXTENSIONS
+ case glslang::EbtFloat16:
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch);
+ builder.addCapability(spv::CapabilityFloat16ImageAMD);
+ return builder.makeFloatType(16);
+#endif
+ case glslang::EbtInt: return builder.makeIntType(32);
+ case glslang::EbtUint: return builder.makeUintType(32);
+ default:
+ assert(0);
+ return builder.makeFloatType(32);
+ }
+}
+
+// If node is a swizzle operation, return the type that should be used if
+// the swizzle base is first consumed by another operation, before the swizzle
+// is applied.
+spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node)
+{
+ if (node.getAsOperator() &&
+ node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+ return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType());
+ else
+ return spv::NoType;
+}
+
+// When inverting a swizzle with a parent op, this function
+// will apply the swizzle operation to a completed parent operation.
+spv::Id TGlslangToSpvTraverser::createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped& node, spv::Id parentResult)
+{
+ std::vector<unsigned> swizzle;
+ convertSwizzle(*node.getAsBinaryNode()->getRight()->getAsAggregate(), swizzle);
+ return builder.createRvalueSwizzle(precision, convertGlslangToSpvType(node.getType()), parentResult, swizzle);
+}
+
+// Convert a glslang AST swizzle node to a swizzle vector for building SPIR-V.
+void TGlslangToSpvTraverser::convertSwizzle(const glslang::TIntermAggregate& node, std::vector<unsigned>& swizzle)
+{
+ const glslang::TIntermSequence& swizzleSequence = node.getSequence();
+ for (int i = 0; i < (int)swizzleSequence.size(); ++i)
+ swizzle.push_back(swizzleSequence[i]->getAsConstantUnion()->getConstArray()[0].getIConst());
+}
+
+// Convert from a glslang type to an SPV type, by calling into a
+// recursive version of this function. This establishes the inherited
+// layout state rooted from the top-level type.
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly)
+{
+ return convertGlslangToSpvType(type, getExplicitLayout(type), type.getQualifier(), false, forwardReferenceOnly);
+}
+
+// Do full recursive conversion of an arbitrary glslang type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangStructToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type,
+ glslang::TLayoutPacking explicitLayout, const glslang::TQualifier& qualifier,
+ bool lastBufferBlockMember, bool forwardReferenceOnly)
+{
+ spv::Id spvType = spv::NoResult;
+
+ switch (type.getBasicType()) {
+ case glslang::EbtVoid:
+ spvType = builder.makeVoidType();
+ assert (! type.isArray());
+ break;
+ case glslang::EbtFloat:
+ spvType = builder.makeFloatType(32);
+ break;
+ case glslang::EbtDouble:
+ spvType = builder.makeFloatType(64);
+ break;
+ case glslang::EbtFloat16:
+ spvType = builder.makeFloatType(16);
+ break;
+ case glslang::EbtBool:
+ // "transparent" bool doesn't exist in SPIR-V. The GLSL convention is
+ // a 32-bit int where non-0 means true.
+ if (explicitLayout != glslang::ElpNone)
+ spvType = builder.makeUintType(32);
+ else
+ spvType = builder.makeBoolType();
+ break;
+ case glslang::EbtInt8:
+ spvType = builder.makeIntType(8);
+ break;
+ case glslang::EbtUint8:
+ spvType = builder.makeUintType(8);
+ break;
+ case glslang::EbtInt16:
+ spvType = builder.makeIntType(16);
+ break;
+ case glslang::EbtUint16:
+ spvType = builder.makeUintType(16);
+ break;
+ case glslang::EbtInt:
+ spvType = builder.makeIntType(32);
+ break;
+ case glslang::EbtUint:
+ spvType = builder.makeUintType(32);
+ break;
+ case glslang::EbtInt64:
+ spvType = builder.makeIntType(64);
+ break;
+ case glslang::EbtUint64:
+ spvType = builder.makeUintType(64);
+ break;
+ case glslang::EbtAtomicUint:
+ builder.addCapability(spv::CapabilityAtomicStorage);
+ spvType = builder.makeUintType(32);
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EbtAccStructNV:
+ spvType = builder.makeAccelerationStructureNVType();
+ break;
+#endif
+ case glslang::EbtSampler:
+ {
+ const glslang::TSampler& sampler = type.getSampler();
+ if (sampler.sampler) {
+ // pure sampler
+ spvType = builder.makeSamplerType();
+ } else {
+ // an image is present, make its type
+ spvType = builder.makeImageType(getSampledType(sampler), TranslateDimensionality(sampler), sampler.shadow, sampler.arrayed, sampler.ms,
+ sampler.image ? 2 : 1, TranslateImageFormat(type));
+ if (sampler.combined) {
+ // already has both image and sampler, make the combined type
+ spvType = builder.makeSampledImageType(spvType);
+ }
+ }
+ }
+ break;
+ case glslang::EbtStruct:
+ case glslang::EbtBlock:
+ {
+ // If we've seen this struct type, return it
+ const glslang::TTypeList* glslangMembers = type.getStruct();
+
+ // Try to share structs for different layouts, but not yet for other
+ // kinds of qualification (primarily not yet including interpolant qualification).
+ if (! HasNonLayoutQualifiers(type, qualifier))
+ spvType = structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers];
+ if (spvType != spv::NoResult)
+ break;
+
+ // else, we haven't seen it...
+ if (type.getBasicType() == glslang::EbtBlock)
+ memberRemapper[glslangMembers].resize(glslangMembers->size());
+ spvType = convertGlslangStructToSpvType(type, glslangMembers, explicitLayout, qualifier);
+ }
+ break;
+ case glslang::EbtReference:
+ {
+ // Make the forward pointer, then recurse to convert the structure type, then
+ // patch up the forward pointer with a real pointer type.
+ if (forwardPointers.find(type.getReferentType()) == forwardPointers.end()) {
+ spv::Id forwardId = builder.makeForwardPointer(spv::StorageClassPhysicalStorageBufferEXT);
+ forwardPointers[type.getReferentType()] = forwardId;
+ }
+ spvType = forwardPointers[type.getReferentType()];
+ if (!forwardReferenceOnly) {
+ spv::Id referentType = convertGlslangToSpvType(*type.getReferentType());
+ builder.makePointerFromForwardPointer(spv::StorageClassPhysicalStorageBufferEXT,
+ forwardPointers[type.getReferentType()],
+ referentType);
+ }
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (type.isMatrix())
+ spvType = builder.makeMatrixType(spvType, type.getMatrixCols(), type.getMatrixRows());
+ else {
+ // If this variable has a vector element count greater than 1, create a SPIR-V vector
+ if (type.getVectorSize() > 1)
+ spvType = builder.makeVectorType(spvType, type.getVectorSize());
+ }
+
+ if (type.isCoopMat()) {
+ builder.addCapability(spv::CapabilityCooperativeMatrixNV);
+ builder.addExtension(spv::E_SPV_NV_cooperative_matrix);
+ if (type.getBasicType() == glslang::EbtFloat16)
+ builder.addCapability(spv::CapabilityFloat16);
+
+ spv::Id scope = makeArraySizeId(*type.getTypeParameters(), 1);
+ spv::Id rows = makeArraySizeId(*type.getTypeParameters(), 2);
+ spv::Id cols = makeArraySizeId(*type.getTypeParameters(), 3);
+
+ spvType = builder.makeCooperativeMatrixType(spvType, scope, rows, cols);
+ }
+
+ if (type.isArray()) {
+ int stride = 0; // keep this 0 unless doing an explicit layout; 0 will mean no decoration, no stride
+
+ // Do all but the outer dimension
+ if (type.getArraySizes()->getNumDims() > 1) {
+ // We need to decorate array strides for types needing explicit layout, except blocks.
+ if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) {
+ // Use a dummy glslang type for querying internal strides of
+ // arrays of arrays, but using just a one-dimensional array.
+ glslang::TType simpleArrayType(type, 0); // deference type of the array
+ while (simpleArrayType.getArraySizes()->getNumDims() > 1)
+ simpleArrayType.getArraySizes()->dereference();
+
+ // Will compute the higher-order strides here, rather than making a whole
+ // pile of types and doing repetitive recursion on their contents.
+ stride = getArrayStride(simpleArrayType, explicitLayout, qualifier.layoutMatrix);
+ }
+
+ // make the arrays
+ for (int dim = type.getArraySizes()->getNumDims() - 1; dim > 0; --dim) {
+ spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), dim), stride);
+ if (stride > 0)
+ builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+ stride *= type.getArraySizes()->getDimSize(dim);
+ }
+ } else {
+ // single-dimensional array, and don't yet have stride
+
+ // We need to decorate array strides for types needing explicit layout, except blocks.
+ if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock)
+ stride = getArrayStride(type, explicitLayout, qualifier.layoutMatrix);
+ }
+
+ // Do the outer dimension, which might not be known for a runtime-sized array.
+ // (Unsized arrays that survive through linking will be runtime-sized arrays)
+ if (type.isSizedArray())
+ spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), 0), stride);
+ else {
+ if (!lastBufferBlockMember) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityRuntimeDescriptorArrayEXT);
+ }
+ spvType = builder.makeRuntimeArray(spvType);
+ }
+ if (stride > 0)
+ builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+ }
+
+ return spvType;
+}
+
+// TODO: this functionality should exist at a higher level, in creating the AST
+//
+// Identify interface members that don't have their required extension turned on.
+//
+bool TGlslangToSpvTraverser::filterMember(const glslang::TType& member)
+{
+#ifdef NV_EXTENSIONS
+ auto& extensions = glslangIntermediate->getRequestedExtensions();
+
+ if (member.getFieldName() == "gl_SecondaryViewportMaskNV" &&
+ extensions.find("GL_NV_stereo_view_rendering") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_SecondaryPositionNV" &&
+ extensions.find("GL_NV_stereo_view_rendering") == extensions.end())
+ return true;
+
+ if (glslangIntermediate->getStage() != EShLangMeshNV) {
+ if (member.getFieldName() == "gl_ViewportMask" &&
+ extensions.find("GL_NV_viewport_array2") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_PositionPerViewNV" &&
+ extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_ViewportMaskPerViewNV" &&
+ extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end())
+ return true;
+ }
+#endif
+
+ return false;
+};
+
+// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TType& type,
+ const glslang::TTypeList* glslangMembers,
+ glslang::TLayoutPacking explicitLayout,
+ const glslang::TQualifier& qualifier)
+{
+ // Create a vector of struct types for SPIR-V to consume
+ std::vector<spv::Id> spvMembers;
+ int memberDelta = 0; // how much the member's index changes from glslang to SPIR-V, normally 0, except sometimes for blocks
+ std::vector<std::pair<glslang::TType*, glslang::TQualifier> > deferredForwardPointers;
+ for (int i = 0; i < (int)glslangMembers->size(); i++) {
+ glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+ if (glslangMember.hiddenMember()) {
+ ++memberDelta;
+ if (type.getBasicType() == glslang::EbtBlock)
+ memberRemapper[glslangMembers][i] = -1;
+ } else {
+ if (type.getBasicType() == glslang::EbtBlock) {
+ memberRemapper[glslangMembers][i] = i - memberDelta;
+ if (filterMember(glslangMember))
+ continue;
+ }
+ // modify just this child's view of the qualifier
+ glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+ InheritQualifiers(memberQualifier, qualifier);
+
+ // manually inherit location
+ if (! memberQualifier.hasLocation() && qualifier.hasLocation())
+ memberQualifier.layoutLocation = qualifier.layoutLocation;
+
+ // recurse
+ bool lastBufferBlockMember = qualifier.storage == glslang::EvqBuffer &&
+ i == (int)glslangMembers->size() - 1;
+
+ // Make forward pointers for any pointer members, and create a list of members to
+ // convert to spirv types after creating the struct.
+ if (glslangMember.getBasicType() == glslang::EbtReference) {
+ if (forwardPointers.find(glslangMember.getReferentType()) == forwardPointers.end()) {
+ deferredForwardPointers.push_back(std::make_pair(&glslangMember, memberQualifier));
+ }
+ spvMembers.push_back(
+ convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, true));
+ } else {
+ spvMembers.push_back(
+ convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, false));
+ }
+ }
+ }
+
+ // Make the SPIR-V type
+ spv::Id spvType = builder.makeStructType(spvMembers, type.getTypeName().c_str());
+ if (! HasNonLayoutQualifiers(type, qualifier))
+ structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers] = spvType;
+
+ // Decorate it
+ decorateStructType(type, glslangMembers, explicitLayout, qualifier, spvType);
+
+ for (int i = 0; i < (int)deferredForwardPointers.size(); ++i) {
+ auto it = deferredForwardPointers[i];
+ convertGlslangToSpvType(*it.first, explicitLayout, it.second, false);
+ }
+
+ return spvType;
+}
+
+void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
+ const glslang::TTypeList* glslangMembers,
+ glslang::TLayoutPacking explicitLayout,
+ const glslang::TQualifier& qualifier,
+ spv::Id spvType)
+{
+ // Name and decorate the non-hidden members
+ int offset = -1;
+ int locationOffset = 0; // for use within the members of this struct
+ for (int i = 0; i < (int)glslangMembers->size(); i++) {
+ glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+ int member = i;
+ if (type.getBasicType() == glslang::EbtBlock) {
+ member = memberRemapper[glslangMembers][i];
+ if (filterMember(glslangMember))
+ continue;
+ }
+
+ // modify just this child's view of the qualifier
+ glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+ InheritQualifiers(memberQualifier, qualifier);
+
+ // using -1 above to indicate a hidden member
+ if (member < 0)
+ continue;
+
+ builder.addMemberName(spvType, member, glslangMember.getFieldName().c_str());
+ builder.addMemberDecoration(spvType, member,
+ TranslateLayoutDecoration(glslangMember, memberQualifier.layoutMatrix));
+ builder.addMemberDecoration(spvType, member, TranslatePrecisionDecoration(glslangMember));
+ // Add interpolation and auxiliary storage decorations only to
+ // top-level members of Input and Output storage classes
+ if (type.getQualifier().storage == glslang::EvqVaryingIn ||
+ type.getQualifier().storage == glslang::EvqVaryingOut) {
+ if (type.getBasicType() == glslang::EbtBlock ||
+ glslangIntermediate->getSource() == glslang::EShSourceHlsl) {
+ builder.addMemberDecoration(spvType, member, TranslateInterpolationDecoration(memberQualifier));
+ builder.addMemberDecoration(spvType, member, TranslateAuxiliaryStorageDecoration(memberQualifier));
+#ifdef NV_EXTENSIONS
+ addMeshNVDecoration(spvType, member, memberQualifier);
+#endif
+ }
+ }
+ builder.addMemberDecoration(spvType, member, TranslateInvariantDecoration(memberQualifier));
+
+ if (type.getBasicType() == glslang::EbtBlock &&
+ qualifier.storage == glslang::EvqBuffer) {
+ // Add memory decorations only to top-level members of shader storage block
+ std::vector<spv::Decoration> memory;
+ TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel());
+ for (unsigned int i = 0; i < memory.size(); ++i)
+ builder.addMemberDecoration(spvType, member, memory[i]);
+ }
+
+ // Location assignment was already completed correctly by the front end,
+ // just track whether a member needs to be decorated.
+ // Ignore member locations if the container is an array, as that's
+ // ill-specified and decisions have been made to not allow this.
+ if (! type.isArray() && memberQualifier.hasLocation())
+ builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation);
+
+ if (qualifier.hasLocation()) // track for upcoming inheritance
+ locationOffset += glslangIntermediate->computeTypeLocationSize(
+ glslangMember, glslangIntermediate->getStage());
+
+ // component, XFB, others
+ if (glslangMember.getQualifier().hasComponent())
+ builder.addMemberDecoration(spvType, member, spv::DecorationComponent,
+ glslangMember.getQualifier().layoutComponent);
+ if (glslangMember.getQualifier().hasXfbOffset())
+ builder.addMemberDecoration(spvType, member, spv::DecorationOffset,
+ glslangMember.getQualifier().layoutXfbOffset);
+ else if (explicitLayout != glslang::ElpNone) {
+ // figure out what to do with offset, which is accumulating
+ int nextOffset;
+ updateMemberOffset(type, glslangMember, offset, nextOffset, explicitLayout, memberQualifier.layoutMatrix);
+ if (offset >= 0)
+ builder.addMemberDecoration(spvType, member, spv::DecorationOffset, offset);
+ offset = nextOffset;
+ }
+
+ if (glslangMember.isMatrix() && explicitLayout != glslang::ElpNone)
+ builder.addMemberDecoration(spvType, member, spv::DecorationMatrixStride,
+ getMatrixStride(glslangMember, explicitLayout, memberQualifier.layoutMatrix));
+
+ // built-in variable decorations
+ spv::BuiltIn builtIn = TranslateBuiltInDecoration(glslangMember.getQualifier().builtIn, true);
+ if (builtIn != spv::BuiltInMax)
+ builder.addMemberDecoration(spvType, member, spv::DecorationBuiltIn, (int)builtIn);
+
+ // nonuniform
+ builder.addMemberDecoration(spvType, member, TranslateNonUniformDecoration(glslangMember.getQualifier()));
+
+ if (glslangIntermediate->getHlslFunctionality1() && memberQualifier.semanticName != nullptr) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE,
+ memberQualifier.semanticName);
+ }
+
+#ifdef NV_EXTENSIONS
+ if (builtIn == spv::BuiltInLayer) {
+ // SPV_NV_viewport_array2 extension
+ if (glslangMember.getQualifier().layoutViewportRelative){
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationViewportRelativeNV);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ }
+ if (glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset != -2048){
+ builder.addMemberDecoration(spvType, member,
+ (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV,
+ glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ }
+ }
+ if (glslangMember.getQualifier().layoutPassthrough) {
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationPassthroughNV);
+ builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV);
+ builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough);
+ }
+#endif
+ }
+
+ // Decorate the structure
+ builder.addDecoration(spvType, TranslateLayoutDecoration(type, qualifier.layoutMatrix));
+ builder.addDecoration(spvType, TranslateBlockDecoration(type, glslangIntermediate->usingStorageBuffer()));
+}
+
+// Turn the expression forming the array size into an id.
+// This is not quite trivial, because of specialization constants.
+// Sometimes, a raw constant is turned into an Id, and sometimes
+// a specialization constant expression is.
+spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arraySizes, int dim)
+{
+ // First, see if this is sized with a node, meaning a specialization constant:
+ glslang::TIntermTyped* specNode = arraySizes.getDimNode(dim);
+ if (specNode != nullptr) {
+ builder.clearAccessChain();
+ specNode->traverse(this);
+ return accessChainLoad(specNode->getAsTyped()->getType());
+ }
+
+ // Otherwise, need a compile-time (front end) size, get it:
+ int size = arraySizes.getDimSize(dim);
+ assert(size > 0);
+ return builder.makeUintConstant(size);
+}
+
+// Wrap the builder's accessChainLoad to:
+// - localize handling of RelaxedPrecision
+// - use the SPIR-V inferred type instead of another conversion of the glslang type
+// (avoids unnecessary work and possible type punning for structures)
+// - do conversion of concrete to abstract type
+spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
+{
+ spv::Id nominalTypeId = builder.accessChainGetInferredType();
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ coherentFlags |= TranslateCoherent(type);
+
+ unsigned int alignment = builder.getAccessChain().alignment;
+ alignment |= type.getBufferReferenceAlignment();
+
+ spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
+ TranslateNonUniformDecoration(type.getQualifier()),
+ nominalTypeId,
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
+ TranslateMemoryScope(coherentFlags),
+ alignment);
+
+ // Need to convert to abstract types when necessary
+ if (type.getBasicType() == glslang::EbtBool) {
+ if (builder.isScalarType(nominalTypeId)) {
+ // Conversion for bool
+ spv::Id boolType = builder.makeBoolType();
+ if (nominalTypeId != boolType)
+ loadedId = builder.createBinOp(spv::OpINotEqual, boolType, loadedId, builder.makeUintConstant(0));
+ } else if (builder.isVectorType(nominalTypeId)) {
+ // Conversion for bvec
+ int vecSize = builder.getNumTypeComponents(nominalTypeId);
+ spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+ if (nominalTypeId != bvecType)
+ loadedId = builder.createBinOp(spv::OpINotEqual, bvecType, loadedId, makeSmearedConstant(builder.makeUintConstant(0), vecSize));
+ }
+ }
+
+ return loadedId;
+}
+
+// Wrap the builder's accessChainStore to:
+// - do conversion of concrete to abstract type
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::Id rvalue)
+{
+ // Need to convert to abstract types when necessary
+ if (type.getBasicType() == glslang::EbtBool) {
+ spv::Id nominalTypeId = builder.accessChainGetInferredType();
+
+ if (builder.isScalarType(nominalTypeId)) {
+ // Conversion for bool
+ spv::Id boolType = builder.makeBoolType();
+ if (nominalTypeId != boolType) {
+ // keep these outside arguments, for determinant order-of-evaluation
+ spv::Id one = builder.makeUintConstant(1);
+ spv::Id zero = builder.makeUintConstant(0);
+ rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+ } else if (builder.getTypeId(rvalue) != boolType)
+ rvalue = builder.createBinOp(spv::OpINotEqual, boolType, rvalue, builder.makeUintConstant(0));
+ } else if (builder.isVectorType(nominalTypeId)) {
+ // Conversion for bvec
+ int vecSize = builder.getNumTypeComponents(nominalTypeId);
+ spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+ if (nominalTypeId != bvecType) {
+ // keep these outside arguments, for determinant order-of-evaluation
+ spv::Id one = makeSmearedConstant(builder.makeUintConstant(1), vecSize);
+ spv::Id zero = makeSmearedConstant(builder.makeUintConstant(0), vecSize);
+ rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+ } else if (builder.getTypeId(rvalue) != bvecType)
+ rvalue = builder.createBinOp(spv::OpINotEqual, bvecType, rvalue,
+ makeSmearedConstant(builder.makeUintConstant(0), vecSize));
+ }
+ }
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ coherentFlags |= TranslateCoherent(type);
+
+ unsigned int alignment = builder.getAccessChain().alignment;
+ alignment |= type.getBufferReferenceAlignment();
+
+ builder.accessChainStore(rvalue,
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask),
+ TranslateMemoryScope(coherentFlags), alignment);
+}
+
+// For storing when types match at the glslang level, but not might match at the
+// SPIR-V level.
+//
+// This especially happens when a single glslang type expands to multiple
+// SPIR-V types, like a struct that is used in a member-undecorated way as well
+// as in a member-decorated way.
+//
+// NOTE: This function can handle any store request; if it's not special it
+// simplifies to a simple OpStore.
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id rValue)
+{
+ // we only do the complex path here if it's an aggregate
+ if (! type.isStruct() && ! type.isArray()) {
+ accessChainStore(type, rValue);
+ return;
+ }
+
+ // and, it has to be a case of type aliasing
+ spv::Id rType = builder.getTypeId(rValue);
+ spv::Id lValue = builder.accessChainGetLValue();
+ spv::Id lType = builder.getContainedTypeId(builder.getTypeId(lValue));
+ if (lType == rType) {
+ accessChainStore(type, rValue);
+ return;
+ }
+
+ // Recursively (as needed) copy an aggregate type to a different aggregate type,
+ // where the two types were the same type in GLSL. This requires member
+ // by member copy, recursively.
+
+ // If an array, copy element by element.
+ if (type.isArray()) {
+ glslang::TType glslangElementType(type, 0);
+ spv::Id elementRType = builder.getContainedTypeId(rType);
+ for (int index = 0; index < type.getOuterArraySize(); ++index) {
+ // get the source member
+ spv::Id elementRValue = builder.createCompositeExtract(rValue, elementRType, index);
+
+ // set up the target storage
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(lValue);
+ builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type), type.getBufferReferenceAlignment());
+
+ // store the member
+ multiTypeStore(glslangElementType, elementRValue);
+ }
+ } else {
+ assert(type.isStruct());
+
+ // loop over structure members
+ const glslang::TTypeList& members = *type.getStruct();
+ for (int m = 0; m < (int)members.size(); ++m) {
+ const glslang::TType& glslangMemberType = *members[m].type;
+
+ // get the source member
+ spv::Id memberRType = builder.getContainedTypeId(rType, m);
+ spv::Id memberRValue = builder.createCompositeExtract(rValue, memberRType, m);
+
+ // set up the target storage
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(lValue);
+ builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type), type.getBufferReferenceAlignment());
+
+ // store the member
+ multiTypeStore(glslangMemberType, memberRValue);
+ }
+ }
+}
+
+// Decide whether or not this type should be
+// decorated with offsets and strides, and if so
+// whether std140 or std430 rules should be applied.
+glslang::TLayoutPacking TGlslangToSpvTraverser::getExplicitLayout(const glslang::TType& type) const
+{
+ // has to be a block
+ if (type.getBasicType() != glslang::EbtBlock)
+ return glslang::ElpNone;
+
+ // has to be a uniform or buffer block or task in/out blocks
+ if (type.getQualifier().storage != glslang::EvqUniform &&
+ type.getQualifier().storage != glslang::EvqBuffer &&
+ !type.getQualifier().isTaskMemory())
+ return glslang::ElpNone;
+
+ // return the layout to use
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpStd140:
+ case glslang::ElpStd430:
+ case glslang::ElpScalar:
+ return type.getQualifier().layoutPacking;
+ default:
+ return glslang::ElpNone;
+ }
+}
+
+// Given an array type, returns the integer stride required for that array
+int TGlslangToSpvTraverser::getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ int size;
+ int stride;
+ glslangIntermediate->getMemberAlignment(arrayType, size, stride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ return stride;
+}
+
+// Given a matrix type, or array (of array) of matrixes type, returns the integer stride required for that matrix
+// when used as a member of an interface block
+int TGlslangToSpvTraverser::getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ glslang::TType elementType;
+ elementType.shallowCopy(matrixType);
+ elementType.clearArraySizes();
+
+ int size;
+ int stride;
+ glslangIntermediate->getMemberAlignment(elementType, size, stride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ return stride;
+}
+
+// Given a member type of a struct, realign the current offset for it, and compute
+// the next (not yet aligned) offset for the next member, which will get aligned
+// on the next call.
+// 'currentOffset' should be passed in already initialized, ready to modify, and reflecting
+// the migration of data from nextOffset -> currentOffset. It should be -1 on the first call.
+// -1 means a non-forced member offset (no decoration needed).
+void TGlslangToSpvTraverser::updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset, int& nextOffset,
+ glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ // this will get a positive value when deemed necessary
+ nextOffset = -1;
+
+ // override anything in currentOffset with user-set offset
+ if (memberType.getQualifier().hasOffset())
+ currentOffset = memberType.getQualifier().layoutOffset;
+
+ // It could be that current linker usage in glslang updated all the layoutOffset,
+ // in which case the following code does not matter. But, that's not quite right
+ // once cross-compilation unit GLSL validation is done, as the original user
+ // settings are needed in layoutOffset, and then the following will come into play.
+
+ if (explicitLayout == glslang::ElpNone) {
+ if (! memberType.getQualifier().hasOffset())
+ currentOffset = -1;
+
+ return;
+ }
+
+ // Getting this far means we need explicit offsets
+ if (currentOffset < 0)
+ currentOffset = 0;
+
+ // Now, currentOffset is valid (either 0, or from a previous nextOffset),
+ // but possibly not yet correctly aligned.
+
+ int memberSize;
+ int dummyStride;
+ int memberAlignment = glslangIntermediate->getMemberAlignment(memberType, memberSize, dummyStride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ // Adjust alignment for HLSL rules
+ // TODO: make this consistent in early phases of code:
+ // adjusting this late means inconsistencies with earlier code, which for reflection is an issue
+ // Until reflection is brought in sync with these adjustments, don't apply to $Global,
+ // which is the most likely to rely on reflection, and least likely to rely implicit layouts
+ if (glslangIntermediate->usingHlslOffsets() &&
+ ! memberType.isArray() && memberType.isVector() && structType.getTypeName().compare("$Global") != 0) {
+ int dummySize;
+ int componentAlignment = glslangIntermediate->getBaseAlignmentScalar(memberType, dummySize);
+ if (componentAlignment <= 4)
+ memberAlignment = componentAlignment;
+ }
+
+ // Bump up to member alignment
+ glslang::RoundToPow2(currentOffset, memberAlignment);
+
+ // Bump up to vec4 if there is a bad straddle
+ if (explicitLayout != glslang::ElpScalar && glslangIntermediate->improperStraddle(memberType, memberSize, currentOffset))
+ glslang::RoundToPow2(currentOffset, 16);
+
+ nextOffset = currentOffset + memberSize;
+}
+
+void TGlslangToSpvTraverser::declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember)
+{
+ const glslang::TBuiltInVariable glslangBuiltIn = members[glslangMember].type->getQualifier().builtIn;
+ switch (glslangBuiltIn)
+ {
+ case glslang::EbvClipDistance:
+ case glslang::EbvCullDistance:
+ case glslang::EbvPointSize:
+#ifdef NV_EXTENSIONS
+ case glslang::EbvViewportMaskNV:
+ case glslang::EbvSecondaryPositionNV:
+ case glslang::EbvSecondaryViewportMaskNV:
+ case glslang::EbvPositionPerViewNV:
+ case glslang::EbvViewportMaskPerViewNV:
+ case glslang::EbvTaskCountNV:
+ case glslang::EbvPrimitiveCountNV:
+ case glslang::EbvPrimitiveIndicesNV:
+ case glslang::EbvClipDistancePerViewNV:
+ case glslang::EbvCullDistancePerViewNV:
+ case glslang::EbvLayerPerViewNV:
+ case glslang::EbvMeshViewCountNV:
+ case glslang::EbvMeshViewIndicesNV:
+#endif
+ // Generate the associated capability. Delegate to TranslateBuiltInDecoration.
+ // Alternately, we could just call this for any glslang built-in, since the
+ // capability already guards against duplicates.
+ TranslateBuiltInDecoration(glslangBuiltIn, false);
+ break;
+ default:
+ // Capabilities were already generated when the struct was declared.
+ break;
+ }
+}
+
+bool TGlslangToSpvTraverser::isShaderEntryPoint(const glslang::TIntermAggregate* node)
+{
+ return node->getName().compare(glslangIntermediate->getEntryPointMangledName().c_str()) == 0;
+}
+
+// Does parameter need a place to keep writes, separate from the original?
+// Assumes called after originalParam(), which filters out block/buffer/opaque-based
+// qualifiers such that we should have only in/out/inout/constreadonly here.
+bool TGlslangToSpvTraverser::writableParam(glslang::TStorageQualifier qualifier) const
+{
+ assert(qualifier == glslang::EvqIn ||
+ qualifier == glslang::EvqOut ||
+ qualifier == glslang::EvqInOut ||
+ qualifier == glslang::EvqConstReadOnly);
+ return qualifier != glslang::EvqConstReadOnly;
+}
+
+// Is parameter pass-by-original?
+bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier, const glslang::TType& paramType,
+ bool implicitThisParam)
+{
+ if (implicitThisParam) // implicit this
+ return true;
+ if (glslangIntermediate->getSource() == glslang::EShSourceHlsl)
+ return paramType.getBasicType() == glslang::EbtBlock;
+ return paramType.containsOpaque() || // sampler, etc.
+ (paramType.getBasicType() == glslang::EbtBlock && qualifier == glslang::EvqBuffer); // SSBO
+}
+
+// Make all the functions, skeletally, without actually visiting their bodies.
+void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+ const auto getParamDecorations = [&](std::vector<spv::Decoration>& decorations, const glslang::TType& type, bool useVulkanMemoryModel) {
+ spv::Decoration paramPrecision = TranslatePrecisionDecoration(type);
+ if (paramPrecision != spv::NoPrecision)
+ decorations.push_back(paramPrecision);
+ TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
+ if (type.getBasicType() == glslang::EbtReference) {
+ // Original and non-writable params pass the pointer directly and
+ // use restrict/aliased, others are stored to a pointer in Function
+ // memory and use RestrictPointer/AliasedPointer.
+ if (originalParam(type.getQualifier().storage, type, false) ||
+ !writableParam(type.getQualifier().storage)) {
+ decorations.push_back(type.getQualifier().restrict ? spv::DecorationRestrict : spv::DecorationAliased);
+ } else {
+ decorations.push_back(type.getQualifier().restrict ? spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT);
+ }
+ }
+ };
+
+ for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+ glslang::TIntermAggregate* glslFunction = glslFunctions[f]->getAsAggregate();
+ if (! glslFunction || glslFunction->getOp() != glslang::EOpFunction || isShaderEntryPoint(glslFunction))
+ continue;
+
+ // We're on a user function. Set up the basic interface for the function now,
+ // so that it's available to call. Translating the body will happen later.
+ //
+ // Typically (except for a "const in" parameter), an address will be passed to the
+ // function. What it is an address of varies:
+ //
+ // - "in" parameters not marked as "const" can be written to without modifying the calling
+ // argument so that write needs to be to a copy, hence the address of a copy works.
+ //
+ // - "const in" parameters can just be the r-value, as no writes need occur.
+ //
+ // - "out" and "inout" arguments can't be done as pointers to the calling argument, because
+ // GLSL has copy-in/copy-out semantics. They can be handled though with a pointer to a copy.
+
+ std::vector<spv::Id> paramTypes;
+ std::vector<std::vector<spv::Decoration>> paramDecorations; // list of decorations per parameter
+ glslang::TIntermSequence& parameters = glslFunction->getSequence()[0]->getAsAggregate()->getSequence();
+
+ bool implicitThis = (int)parameters.size() > 0 && parameters[0]->getAsSymbolNode()->getName() ==
+ glslangIntermediate->implicitThisName;
+
+ paramDecorations.resize(parameters.size());
+ for (int p = 0; p < (int)parameters.size(); ++p) {
+ const glslang::TType& paramType = parameters[p]->getAsTyped()->getType();
+ spv::Id typeId = convertGlslangToSpvType(paramType);
+ if (originalParam(paramType.getQualifier().storage, paramType, implicitThis && p == 0))
+ typeId = builder.makePointer(TranslateStorageClass(paramType), typeId);
+ else if (writableParam(paramType.getQualifier().storage))
+ typeId = builder.makePointer(spv::StorageClassFunction, typeId);
+ else
+ rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
+ getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel());
+ paramTypes.push_back(typeId);
+ }
+
+ spv::Block* functionBlock;
+ spv::Function *function = builder.makeFunctionEntry(TranslatePrecisionDecoration(glslFunction->getType()),
+ convertGlslangToSpvType(glslFunction->getType()),
+ glslFunction->getName().c_str(), paramTypes,
+ paramDecorations, &functionBlock);
+ if (implicitThis)
+ function->setImplicitThis();
+
+ // Track function to emit/call later
+ functionMap[glslFunction->getName().c_str()] = function;
+
+ // Set the parameter id's
+ for (int p = 0; p < (int)parameters.size(); ++p) {
+ symbolValues[parameters[p]->getAsSymbolNode()->getId()] = function->getParamId(p);
+ // give a name too
+ builder.addName(function->getParamId(p), parameters[p]->getAsSymbolNode()->getName().c_str());
+ }
+ }
+}
+
+// Process all the initializers, while skipping the functions and link objects
+void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequence& initializers)
+{
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ for (int i = 0; i < (int)initializers.size(); ++i) {
+ glslang::TIntermAggregate* initializer = initializers[i]->getAsAggregate();
+ if (initializer && initializer->getOp() != glslang::EOpFunction && initializer->getOp() != glslang::EOpLinkerObjects) {
+
+ // We're on a top-level node that's not a function. Treat as an initializer, whose
+ // code goes into the beginning of the entry point.
+ initializer->traverse(this);
+ }
+ }
+}
+
+// Process all the functions, while skipping initializers.
+void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+ for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+ glslang::TIntermAggregate* node = glslFunctions[f]->getAsAggregate();
+ if (node && (node->getOp() == glslang::EOpFunction || node->getOp() == glslang::EOpLinkerObjects))
+ node->traverse(this);
+ }
+}
+
+void TGlslangToSpvTraverser::handleFunctionEntry(const glslang::TIntermAggregate* node)
+{
+ // SPIR-V functions should already be in the functionMap from the prepass
+ // that called makeFunctions().
+ currentFunction = functionMap[node->getName().c_str()];
+ spv::Block* functionBlock = currentFunction->getEntryBlock();
+ builder.setBuildPoint(functionBlock);
+}
+
+void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments)
+{
+ const glslang::TIntermSequence& glslangArguments = node.getSequence();
+
+ glslang::TSampler sampler = {};
+ bool cubeCompare = false;
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = false;
+#endif
+ if (node.isTexture() || node.isImage()) {
+ sampler = glslangArguments[0]->getAsTyped()->getType().getSampler();
+ cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+#ifdef AMD_EXTENSIONS
+ f16ShadowCompare = sampler.shadow && glslangArguments[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16;
+#endif
+ }
+
+ for (int i = 0; i < (int)glslangArguments.size(); ++i) {
+ builder.clearAccessChain();
+ glslangArguments[i]->traverse(this);
+
+ // Special case l-value operands
+ bool lvalue = false;
+ switch (node.getOp()) {
+ case glslang::EOpImageAtomicAdd:
+ case glslang::EOpImageAtomicMin:
+ case glslang::EOpImageAtomicMax:
+ case glslang::EOpImageAtomicAnd:
+ case glslang::EOpImageAtomicOr:
+ case glslang::EOpImageAtomicXor:
+ case glslang::EOpImageAtomicExchange:
+ case glslang::EOpImageAtomicCompSwap:
+ case glslang::EOpImageAtomicLoad:
+ case glslang::EOpImageAtomicStore:
+ if (i == 0)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseImageLoad:
+ if ((sampler.ms && i == 3) || (! sampler.ms && i == 2))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTexture:
+ if (((cubeCompare || f16ShadowCompare) && i == 3) || (! (cubeCompare || f16ShadowCompare) && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureClamp:
+ if (((cubeCompare || f16ShadowCompare) && i == 4) || (! (cubeCompare || f16ShadowCompare) && i == 3))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureLod:
+ case glslang::EOpSparseTextureOffset:
+ if ((f16ShadowCompare && i == 4) || (! f16ShadowCompare && i == 3))
+ lvalue = true;
+ break;
+#else
+ case glslang::EOpSparseTexture:
+ if ((cubeCompare && i == 3) || (! cubeCompare && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureClamp:
+ if ((cubeCompare && i == 4) || (! cubeCompare && i == 3))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureLod:
+ case glslang::EOpSparseTextureOffset:
+ if (i == 3)
+ lvalue = true;
+ break;
+#endif
+ case glslang::EOpSparseTextureFetch:
+ if ((sampler.dim != glslang::EsdRect && i == 3) || (sampler.dim == glslang::EsdRect && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureFetchOffset:
+ if ((sampler.dim != glslang::EsdRect && i == 4) || (sampler.dim == glslang::EsdRect && i == 3))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTextureLodOffset:
+ case glslang::EOpSparseTextureGrad:
+ case glslang::EOpSparseTextureOffsetClamp:
+ if ((f16ShadowCompare && i == 5) || (! f16ShadowCompare && i == 4))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffset:
+ case glslang::EOpSparseTextureGradClamp:
+ if ((f16ShadowCompare && i == 6) || (! f16ShadowCompare && i == 5))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffsetClamp:
+ if ((f16ShadowCompare && i == 7) || (! f16ShadowCompare && i == 6))
+ lvalue = true;
+ break;
+#else
+ case glslang::EOpSparseTextureLodOffset:
+ case glslang::EOpSparseTextureGrad:
+ case glslang::EOpSparseTextureOffsetClamp:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffset:
+ case glslang::EOpSparseTextureGradClamp:
+ if (i == 5)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffsetClamp:
+ if (i == 6)
+ lvalue = true;
+ break;
+#endif
+ case glslang::EOpSparseTextureGather:
+ if ((sampler.shadow && i == 3) || (! sampler.shadow && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGatherOffset:
+ case glslang::EOpSparseTextureGatherOffsets:
+ if ((sampler.shadow && i == 4) || (! sampler.shadow && i == 3))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTextureGatherLod:
+ if (i == 3)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGatherLodOffset:
+ case glslang::EOpSparseTextureGatherLodOffsets:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseImageLoadLod:
+ if (i == 3)
+ lvalue = true;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpImageSampleFootprintNV:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintClampNV:
+ case glslang::EOpImageSampleFootprintLodNV:
+ if (i == 5)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintGradNV:
+ if (i == 6)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintGradClampNV:
+ if (i == 7)
+ lvalue = true;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (lvalue)
+ arguments.push_back(builder.accessChainGetLValue());
+ else
+ arguments.push_back(accessChainLoad(glslangArguments[i]->getAsTyped()->getType()));
+ }
+}
+
+void TGlslangToSpvTraverser::translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments)
+{
+ builder.clearAccessChain();
+ node.getOperand()->traverse(this);
+ arguments.push_back(accessChainLoad(node.getOperand()->getType()));
+}
+
+spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermOperator* node)
+{
+ if (! node->isImage() && ! node->isTexture())
+ return spv::NoResult;
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ // Process a GLSL texturing op (will be SPV image)
+
+ const glslang::TType &imageType = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType()
+ : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType();
+ const glslang::TSampler sampler = imageType.getSampler();
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate())
+ ? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16
+ : false;
+#endif
+
+ std::vector<spv::Id> arguments;
+ if (node->getAsAggregate())
+ translateArguments(*node->getAsAggregate(), arguments);
+ else
+ translateArguments(*node->getAsUnaryNode(), arguments);
+ spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+ spv::Builder::TextureParameters params = { };
+ params.sampler = arguments[0];
+
+ glslang::TCrackedTextureOp cracked;
+ node->crackTexture(sampler, cracked);
+
+ const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint;
+
+ // Check for queries
+ if (cracked.query) {
+ // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first
+ if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+
+ switch (node->getOp()) {
+ case glslang::EOpImageQuerySize:
+ case glslang::EOpTextureQuerySize:
+ if (arguments.size() > 1) {
+ params.lod = arguments[1];
+ return builder.createTextureQueryCall(spv::OpImageQuerySizeLod, params, isUnsignedResult);
+ } else
+ return builder.createTextureQueryCall(spv::OpImageQuerySize, params, isUnsignedResult);
+ case glslang::EOpImageQuerySamples:
+ case glslang::EOpTextureQuerySamples:
+ return builder.createTextureQueryCall(spv::OpImageQuerySamples, params, isUnsignedResult);
+ case glslang::EOpTextureQueryLod:
+ params.coords = arguments[1];
+ return builder.createTextureQueryCall(spv::OpImageQueryLod, params, isUnsignedResult);
+ case glslang::EOpTextureQueryLevels:
+ return builder.createTextureQueryCall(spv::OpImageQueryLevels, params, isUnsignedResult);
+ case glslang::EOpSparseTexelsResident:
+ return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]);
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ int components = node->getType().getVectorSize();
+
+ if (node->getOp() == glslang::EOpTextureFetch) {
+ // These must produce 4 components, per SPIR-V spec. We'll add a conversion constructor if needed.
+ // This will only happen through the HLSL path for operator[], so we do not have to handle e.g.
+ // the EOpTexture/Proj/Lod/etc family. It would be harmless to do so, but would need more logic
+ // here around e.g. which ones return scalars or other types.
+ components = 4;
+ }
+
+ glslang::TType returnType(node->getType().getBasicType(), glslang::EvqTemporary, components);
+
+ auto resultType = [&returnType,this]{ return convertGlslangToSpvType(returnType); };
+
+ // Check for image functions other than queries
+ if (node->isImage()) {
+ std::vector<spv::IdImmediate> operands;
+ auto opIt = arguments.begin();
+ spv::IdImmediate image = { true, *(opIt++) };
+ operands.push_back(image);
+
+ // Handle subpass operations
+ // TODO: GLSL should change to have the "MS" only on the type rather than the
+ // built-in function.
+ if (cracked.subpass) {
+ // add on the (0,0) coordinate
+ spv::Id zero = builder.makeIntConstant(0);
+ std::vector<spv::Id> comps;
+ comps.push_back(zero);
+ comps.push_back(zero);
+ spv::IdImmediate coord = { true,
+ builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps) };
+ operands.push_back(coord);
+ if (sampler.ms) {
+ spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
+ operands.push_back(imageOperands);
+ spv::IdImmediate imageOperand = { true, *(opIt++) };
+ operands.push_back(imageOperand);
+ }
+ spv::Id result = builder.createOp(spv::OpImageRead, resultType(), operands);
+ builder.setPrecision(result, precision);
+ return result;
+ }
+
+ spv::IdImmediate coord = { true, *(opIt++) };
+ operands.push_back(coord);
+#ifdef AMD_EXTENSIONS
+ if (node->getOp() == glslang::EOpImageLoad || node->getOp() == glslang::EOpImageLoadLod) {
+#else
+ if (node->getOp() == glslang::EOpImageLoad) {
+#endif
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
+ if (mask) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+
+ std::vector<spv::Id> result(1, builder.createOp(spv::OpImageRead, resultType(), operands));
+ builder.setPrecision(result[0], precision);
+
+ // If needed, add a conversion constructor to the proper size.
+ if (components != node->getType().getVectorSize())
+ result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType()));
+
+ return result[0];
+#ifdef AMD_EXTENSIONS
+ } else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) {
+#else
+ } else if (node->getOp() == glslang::EOpImageStore) {
+#endif
+
+ // Push the texel value before the operands
+#ifdef AMD_EXTENSIONS
+ if (sampler.ms || cracked.lod) {
+#else
+ if (sampler.ms) {
+#endif
+ spv::IdImmediate texel = { true, *(opIt + 1) };
+ operands.push_back(texel);
+ } else {
+ spv::IdImmediate texel = { true, *opIt };
+ operands.push_back(texel);
+ }
+
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask);
+ if (mask) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) {
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ builder.createNoResultOp(spv::OpImageWrite, operands);
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
+ return spv::NoResult;
+#ifdef AMD_EXTENSIONS
+ } else if (node->getOp() == glslang::EOpSparseImageLoad || node->getOp() == glslang::EOpSparseImageLoadLod) {
+#else
+ } else if (node->getOp() == glslang::EOpSparseImageLoad) {
+#endif
+ builder.addCapability(spv::CapabilitySparseResidency);
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
+ if (mask) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ // Create the return type that was a special structure
+ spv::Id texelOut = *opIt;
+ spv::Id typeId0 = resultType();
+ spv::Id typeId1 = builder.getDerefTypeId(texelOut);
+ spv::Id resultTypeId = builder.makeStructResultType(typeId0, typeId1);
+
+ spv::Id resultId = builder.createOp(spv::OpImageSparseRead, resultTypeId, operands);
+
+ // Decode the return type
+ builder.createStore(builder.createCompositeExtract(resultId, typeId1, 1), texelOut);
+ return builder.createCompositeExtract(resultId, typeId0, 0);
+ } else {
+ // Process image atomic operations
+
+ // GLSL "IMAGE_PARAMS" will involve in constructing an image texel pointer and this pointer,
+ // as the first source operand, is required by SPIR-V atomic operations.
+ // For non-MS, the sample value should be 0
+ spv::IdImmediate sample = { true, sampler.ms ? *(opIt++) : builder.makeUintConstant(0) };
+ operands.push_back(sample);
+
+ spv::Id resultTypeId;
+ // imageAtomicStore has a void return type so base the pointer type on
+ // the type of the value operand.
+ if (node->getOp() == glslang::EOpImageAtomicStore) {
+ resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(operands[2].word));
+ } else {
+ resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
+ }
+ spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
+
+ std::vector<spv::Id> operands;
+ operands.push_back(pointer);
+ for (; opIt != arguments.end(); ++opIt)
+ operands.push_back(*opIt);
+
+ return createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ // Check for fragment mask functions other than queries
+ if (cracked.fragMask) {
+ assert(sampler.ms);
+
+ auto opIt = arguments.begin();
+ std::vector<spv::Id> operands;
+
+ // Extract the image if necessary
+ if (builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+
+ operands.push_back(params.sampler);
+ ++opIt;
+
+ if (sampler.isSubpass()) {
+ // add on the (0,0) coordinate
+ spv::Id zero = builder.makeIntConstant(0);
+ std::vector<spv::Id> comps;
+ comps.push_back(zero);
+ comps.push_back(zero);
+ operands.push_back(builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps));
+ }
+
+ for (; opIt != arguments.end(); ++opIt)
+ operands.push_back(*opIt);
+
+ spv::Op fragMaskOp = spv::OpNop;
+ if (node->getOp() == glslang::EOpFragmentMaskFetch)
+ fragMaskOp = spv::OpFragmentMaskFetchAMD;
+ else if (node->getOp() == glslang::EOpFragmentFetch)
+ fragMaskOp = spv::OpFragmentFetchAMD;
+
+ builder.addExtension(spv::E_SPV_AMD_shader_fragment_mask);
+ builder.addCapability(spv::CapabilityFragmentMaskAMD);
+ return builder.createOp(fragMaskOp, resultType(), operands);
+ }
+#endif
+
+ // Check for texture functions other than queries
+ bool sparse = node->isSparseTexture();
+#ifdef NV_EXTENSIONS
+ bool imageFootprint = node->isImageFootprint();
+#endif
+
+ bool cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+
+ // check for bias argument
+ bool bias = false;
+#ifdef AMD_EXTENSIONS
+ if (! cracked.lod && ! cracked.grad && ! cracked.fetch && ! cubeCompare) {
+#else
+ if (! cracked.lod && ! cracked.gather && ! cracked.grad && ! cracked.fetch && ! cubeCompare) {
+#endif
+ int nonBiasArgCount = 2;
+#ifdef AMD_EXTENSIONS
+ if (cracked.gather)
+ ++nonBiasArgCount; // comp argument should be present when bias argument is present
+
+ if (f16ShadowCompare)
+ ++nonBiasArgCount;
+#endif
+ if (cracked.offset)
+ ++nonBiasArgCount;
+#ifdef AMD_EXTENSIONS
+ else if (cracked.offsets)
+ ++nonBiasArgCount;
+#endif
+ if (cracked.grad)
+ nonBiasArgCount += 2;
+ if (cracked.lodClamp)
+ ++nonBiasArgCount;
+ if (sparse)
+ ++nonBiasArgCount;
+#ifdef NV_EXTENSIONS
+ if (imageFootprint)
+ //Following three extra arguments
+ // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint
+ nonBiasArgCount += 3;
+#endif
+ if ((int)arguments.size() > nonBiasArgCount)
+ bias = true;
+ }
+
+ // See if the sampler param should really be just the SPV image part
+ if (cracked.fetch) {
+ // a fetch needs to have the image extracted first
+ if (builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (cracked.gather) {
+ const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
+ if (bias || cracked.lod ||
+ sourceExtensions.find(glslang::E_GL_AMD_texture_gather_bias_lod) != sourceExtensions.end()) {
+ builder.addExtension(spv::E_SPV_AMD_texture_gather_bias_lod);
+ builder.addCapability(spv::CapabilityImageGatherBiasLodAMD);
+ }
+ }
+#endif
+
+ // set the rest of the arguments
+
+ params.coords = arguments[1];
+ int extraArgs = 0;
+ bool noImplicitLod = false;
+
+ // sort out where Dref is coming from
+#ifdef AMD_EXTENSIONS
+ if (cubeCompare || f16ShadowCompare) {
+#else
+ if (cubeCompare) {
+#endif
+ params.Dref = arguments[2];
+ ++extraArgs;
+ } else if (sampler.shadow && cracked.gather) {
+ params.Dref = arguments[2];
+ ++extraArgs;
+ } else if (sampler.shadow) {
+ std::vector<spv::Id> indexes;
+ int dRefComp;
+ if (cracked.proj)
+ dRefComp = 2; // "The resulting 3rd component of P in the shadow forms is used as Dref"
+ else
+ dRefComp = builder.getNumComponents(params.coords) - 1;
+ indexes.push_back(dRefComp);
+ params.Dref = builder.createCompositeExtract(params.coords, builder.getScalarTypeId(builder.getTypeId(params.coords)), indexes);
+ }
+
+ // lod
+ if (cracked.lod) {
+ params.lod = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else if (glslangIntermediate->getStage() != EShLangFragment
+#ifdef NV_EXTENSIONS
+ // NV_compute_shader_derivatives layout qualifiers allow for implicit LODs
+ && !(glslangIntermediate->getStage() == EShLangCompute &&
+ (glslangIntermediate->getLayoutDerivativeModeNone() != glslang::LayoutDerivativeNone))
+#endif
+ ) {
+ // we need to invent the default lod for an explicit lod instruction for a non-fragment stage
+ noImplicitLod = true;
+ }
+
+ // multisample
+ if (sampler.ms) {
+ params.sample = arguments[2 + extraArgs]; // For MS, "sample" should be specified
+ ++extraArgs;
+ }
+
+ // gradient
+ if (cracked.grad) {
+ params.gradX = arguments[2 + extraArgs];
+ params.gradY = arguments[3 + extraArgs];
+ extraArgs += 2;
+ }
+
+ // offset and offsets
+ if (cracked.offset) {
+ params.offset = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else if (cracked.offsets) {
+ params.offsets = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+ // lod clamp
+ if (cracked.lodClamp) {
+ params.lodClamp = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+ // sparse
+ if (sparse) {
+ params.texelOut = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+ // gather component
+ if (cracked.gather && ! sampler.shadow) {
+ // default component is 0, if missing, otherwise an argument
+ if (2 + extraArgs < (int)arguments.size()) {
+ params.component = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else
+ params.component = builder.makeIntConstant(0);
+ }
+#ifdef NV_EXTENSIONS
+ spv::Id resultStruct = spv::NoResult;
+ if (imageFootprint) {
+ //Following three extra arguments
+ // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint
+ params.granularity = arguments[2 + extraArgs];
+ params.coarse = arguments[3 + extraArgs];
+ resultStruct = arguments[4 + extraArgs];
+ extraArgs += 3;
+ }
+#endif
+ // bias
+ if (bias) {
+ params.bias = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+#ifdef NV_EXTENSIONS
+ if (imageFootprint) {
+ builder.addExtension(spv::E_SPV_NV_shader_image_footprint);
+ builder.addCapability(spv::CapabilityImageFootprintNV);
+
+
+ //resultStructType(OpenGL type) contains 5 elements:
+ //struct gl_TextureFootprint2DNV {
+ // uvec2 anchor;
+ // uvec2 offset;
+ // uvec2 mask;
+ // uint lod;
+ // uint granularity;
+ //};
+ //or
+ //struct gl_TextureFootprint3DNV {
+ // uvec3 anchor;
+ // uvec3 offset;
+ // uvec2 mask;
+ // uint lod;
+ // uint granularity;
+ //};
+ spv::Id resultStructType = builder.getContainedTypeId(builder.getTypeId(resultStruct));
+ assert(builder.isStructType(resultStructType));
+
+ //resType (SPIR-V type) contains 6 elements:
+ //Member 0 must be a Boolean type scalar(LOD),
+ //Member 1 must be a vector of integer type, whose Signedness operand is 0(anchor),
+ //Member 2 must be a vector of integer type, whose Signedness operand is 0(offset),
+ //Member 3 must be a vector of integer type, whose Signedness operand is 0(mask),
+ //Member 4 must be a scalar of integer type, whose Signedness operand is 0(lod),
+ //Member 5 must be a scalar of integer type, whose Signedness operand is 0(granularity).
+ std::vector<spv::Id> members;
+ members.push_back(resultType());
+ for (int i = 0; i < 5; i++) {
+ members.push_back(builder.getContainedTypeId(resultStructType, i));
+ }
+ spv::Id resType = builder.makeStructType(members, "ResType");
+
+ //call ImageFootprintNV
+ spv::Id res = builder.createTextureCall(precision, resType, sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params);
+
+ //copy resType (SPIR-V type) to resultStructType(OpenGL type)
+ for (int i = 0; i < 5; i++) {
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(resultStruct);
+
+ //Accessing to a struct we created, no coherent flag is set
+ spv::Builder::AccessChain::CoherentFlags flags;
+ flags.clear();
+
+ builder.accessChainPush(builder.makeIntConstant(i), flags, 0);
+ builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1), i+1));
+ }
+ return builder.createCompositeExtract(res, resultType(), 0);
+ }
+#endif
+
+ // projective component (might not to move)
+ // GLSL: "The texture coordinates consumed from P, not including the last component of P,
+ // are divided by the last component of P."
+ // SPIR-V: "... (u [, v] [, w], q)... It may be a vector larger than needed, but all
+ // unused components will appear after all used components."
+ if (cracked.proj) {
+ int projSourceComp = builder.getNumComponents(params.coords) - 1;
+ int projTargetComp;
+ switch (sampler.dim) {
+ case glslang::Esd1D: projTargetComp = 1; break;
+ case glslang::Esd2D: projTargetComp = 2; break;
+ case glslang::EsdRect: projTargetComp = 2; break;
+ default: projTargetComp = projSourceComp; break;
+ }
+ // copy the projective coordinate if we have to
+ if (projTargetComp != projSourceComp) {
+ spv::Id projComp = builder.createCompositeExtract(params.coords,
+ builder.getScalarTypeId(builder.getTypeId(params.coords)),
+ projSourceComp);
+ params.coords = builder.createCompositeInsert(projComp, params.coords,
+ builder.getTypeId(params.coords), projTargetComp);
+ }
+ }
+
+ // nonprivate
+ if (imageType.getQualifier().nonprivate) {
+ params.nonprivate = true;
+ }
+
+ // volatile
+ if (imageType.getQualifier().volatil) {
+ params.volatil = true;
+ }
+
+ std::vector<spv::Id> result( 1,
+ builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params)
+ );
+
+ if (components != node->getType().getVectorSize())
+ result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType()));
+
+ return result[0];
+}
+
+spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAggregate* node)
+{
+ // Grab the function's pointer from the previously created function
+ spv::Function* function = functionMap[node->getName().c_str()];
+ if (! function)
+ return 0;
+
+ const glslang::TIntermSequence& glslangArgs = node->getSequence();
+ const glslang::TQualifierList& qualifiers = node->getQualifierList();
+
+ // See comments in makeFunctions() for details about the semantics for parameter passing.
+ //
+ // These imply we need a four step process:
+ // 1. Evaluate the arguments
+ // 2. Allocate and make copies of in, out, and inout arguments
+ // 3. Make the call
+ // 4. Copy back the results
+
+ // 1. Evaluate the arguments and their types
+ std::vector<spv::Builder::AccessChain> lValues;
+ std::vector<spv::Id> rValues;
+ std::vector<const glslang::TType*> argTypes;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ argTypes.push_back(&glslangArgs[a]->getAsTyped()->getType());
+ // build l-value
+ builder.clearAccessChain();
+ glslangArgs[a]->traverse(this);
+ // keep outputs and pass-by-originals as l-values, evaluate others as r-values
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0) ||
+ writableParam(qualifiers[a])) {
+ // save l-value
+ lValues.push_back(builder.getAccessChain());
+ } else {
+ // process r-value
+ rValues.push_back(accessChainLoad(*argTypes.back()));
+ }
+ }
+
+ // 2. Allocate space for anything needing a copy, and if it's "in" or "inout"
+ // copy the original into that space.
+ //
+ // Also, build up the list of actual arguments to pass in for the call
+ int lValueCount = 0;
+ int rValueCount = 0;
+ std::vector<spv::Id> spvArgs;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ spv::Id arg;
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) {
+ builder.setAccessChain(lValues[lValueCount]);
+ arg = builder.accessChainGetLValue();
+ ++lValueCount;
+ } else if (writableParam(qualifiers[a])) {
+ // need space to hold the copy
+ arg = builder.createVariable(spv::StorageClassFunction, builder.getContainedTypeId(function->getParamType(a)), "param");
+ if (qualifiers[a] == glslang::EvqIn || qualifiers[a] == glslang::EvqInOut) {
+ // need to copy the input into output space
+ builder.setAccessChain(lValues[lValueCount]);
+ spv::Id copy = accessChainLoad(*argTypes[a]);
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(arg);
+ multiTypeStore(*argTypes[a], copy);
+ }
+ ++lValueCount;
+ } else {
+ // process r-value, which involves a copy for a type mismatch
+ if (function->getParamType(a) != convertGlslangToSpvType(*argTypes[a])) {
+ spv::Id argCopy = builder.createVariable(spv::StorageClassFunction, function->getParamType(a), "arg");
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(argCopy);
+ multiTypeStore(*argTypes[a], rValues[rValueCount]);
+ arg = builder.createLoad(argCopy);
+ } else
+ arg = rValues[rValueCount];
+ ++rValueCount;
+ }
+ spvArgs.push_back(arg);
+ }
+
+ // 3. Make the call.
+ spv::Id result = builder.createFunctionCall(function, spvArgs);
+ builder.setPrecision(result, TranslatePrecisionDecoration(node->getType()));
+
+ // 4. Copy back out an "out" arguments.
+ lValueCount = 0;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0))
+ ++lValueCount;
+ else if (writableParam(qualifiers[a])) {
+ if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) {
+ spv::Id copy = builder.createLoad(spvArgs[a]);
+ builder.setAccessChain(lValues[lValueCount]);
+ multiTypeStore(*argTypes[a], copy);
+ }
+ ++lValueCount;
+ }
+ }
+
+ return result;
+}
+
+// Translate AST operation to SPV operation, already having SPV-based operands/types.
+spv::Id TGlslangToSpvTraverser::createBinaryOperation(glslang::TOperator op, OpDecorations& decorations,
+ spv::Id typeId, spv::Id left, spv::Id right,
+ glslang::TBasicType typeProxy, bool reduceComparison)
+{
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+ bool isBool = typeProxy == glslang::EbtBool;
+
+ spv::Op binOp = spv::OpNop;
+ bool needMatchingVectors = true; // for non-matrix ops, would a scalar need to smear to match a vector?
+ bool comparison = false;
+
+ switch (op) {
+ case glslang::EOpAdd:
+ case glslang::EOpAddAssign:
+ if (isFloat)
+ binOp = spv::OpFAdd;
+ else
+ binOp = spv::OpIAdd;
+ break;
+ case glslang::EOpSub:
+ case glslang::EOpSubAssign:
+ if (isFloat)
+ binOp = spv::OpFSub;
+ else
+ binOp = spv::OpISub;
+ break;
+ case glslang::EOpMul:
+ case glslang::EOpMulAssign:
+ if (isFloat)
+ binOp = spv::OpFMul;
+ else
+ binOp = spv::OpIMul;
+ break;
+ case glslang::EOpVectorTimesScalar:
+ case glslang::EOpVectorTimesScalarAssign:
+ if (isFloat && (builder.isVector(left) || builder.isVector(right))) {
+ if (builder.isVector(right))
+ std::swap(left, right);
+ assert(builder.isScalar(right));
+ needMatchingVectors = false;
+ binOp = spv::OpVectorTimesScalar;
+ } else if (isFloat)
+ binOp = spv::OpFMul;
+ else
+ binOp = spv::OpIMul;
+ break;
+ case glslang::EOpVectorTimesMatrix:
+ case glslang::EOpVectorTimesMatrixAssign:
+ binOp = spv::OpVectorTimesMatrix;
+ break;
+ case glslang::EOpMatrixTimesVector:
+ binOp = spv::OpMatrixTimesVector;
+ break;
+ case glslang::EOpMatrixTimesScalar:
+ case glslang::EOpMatrixTimesScalarAssign:
+ binOp = spv::OpMatrixTimesScalar;
+ break;
+ case glslang::EOpMatrixTimesMatrix:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ binOp = spv::OpMatrixTimesMatrix;
+ break;
+ case glslang::EOpOuterProduct:
+ binOp = spv::OpOuterProduct;
+ needMatchingVectors = false;
+ break;
+
+ case glslang::EOpDiv:
+ case glslang::EOpDivAssign:
+ if (isFloat)
+ binOp = spv::OpFDiv;
+ else if (isUnsigned)
+ binOp = spv::OpUDiv;
+ else
+ binOp = spv::OpSDiv;
+ break;
+ case glslang::EOpMod:
+ case glslang::EOpModAssign:
+ if (isFloat)
+ binOp = spv::OpFMod;
+ else if (isUnsigned)
+ binOp = spv::OpUMod;
+ else
+ binOp = spv::OpSMod;
+ break;
+ case glslang::EOpRightShift:
+ case glslang::EOpRightShiftAssign:
+ if (isUnsigned)
+ binOp = spv::OpShiftRightLogical;
+ else
+ binOp = spv::OpShiftRightArithmetic;
+ break;
+ case glslang::EOpLeftShift:
+ case glslang::EOpLeftShiftAssign:
+ binOp = spv::OpShiftLeftLogical;
+ break;
+ case glslang::EOpAnd:
+ case glslang::EOpAndAssign:
+ binOp = spv::OpBitwiseAnd;
+ break;
+ case glslang::EOpLogicalAnd:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalAnd;
+ break;
+ case glslang::EOpInclusiveOr:
+ case glslang::EOpInclusiveOrAssign:
+ binOp = spv::OpBitwiseOr;
+ break;
+ case glslang::EOpLogicalOr:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalOr;
+ break;
+ case glslang::EOpExclusiveOr:
+ case glslang::EOpExclusiveOrAssign:
+ binOp = spv::OpBitwiseXor;
+ break;
+ case glslang::EOpLogicalXor:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalNotEqual;
+ break;
+
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpEqual:
+ case glslang::EOpNotEqual:
+ case glslang::EOpVectorEqual:
+ case glslang::EOpVectorNotEqual:
+ comparison = true;
+ break;
+ default:
+ break;
+ }
+
+ // handle mapped binary operations (should be non-comparison)
+ if (binOp != spv::OpNop) {
+ assert(comparison == false);
+ if (builder.isMatrix(left) || builder.isMatrix(right) ||
+ builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right))
+ return createBinaryMatrixOperation(binOp, decorations, typeId, left, right);
+
+ // No matrix involved; make both operands be the same number of components, if needed
+ if (needMatchingVectors)
+ builder.promoteScalar(decorations.precision, left, right);
+
+ spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ if (! comparison)
+ return 0;
+
+ // Handle comparison instructions
+
+ if (reduceComparison && (op == glslang::EOpEqual || op == glslang::EOpNotEqual)
+ && (builder.isVector(left) || builder.isMatrix(left) || builder.isAggregate(left))) {
+ spv::Id result = builder.createCompositeCompare(decorations.precision, left, right, op == glslang::EOpEqual);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+ }
+
+ switch (op) {
+ case glslang::EOpLessThan:
+ if (isFloat)
+ binOp = spv::OpFOrdLessThan;
+ else if (isUnsigned)
+ binOp = spv::OpULessThan;
+ else
+ binOp = spv::OpSLessThan;
+ break;
+ case glslang::EOpGreaterThan:
+ if (isFloat)
+ binOp = spv::OpFOrdGreaterThan;
+ else if (isUnsigned)
+ binOp = spv::OpUGreaterThan;
+ else
+ binOp = spv::OpSGreaterThan;
+ break;
+ case glslang::EOpLessThanEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdLessThanEqual;
+ else if (isUnsigned)
+ binOp = spv::OpULessThanEqual;
+ else
+ binOp = spv::OpSLessThanEqual;
+ break;
+ case glslang::EOpGreaterThanEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdGreaterThanEqual;
+ else if (isUnsigned)
+ binOp = spv::OpUGreaterThanEqual;
+ else
+ binOp = spv::OpSGreaterThanEqual;
+ break;
+ case glslang::EOpEqual:
+ case glslang::EOpVectorEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdEqual;
+ else if (isBool)
+ binOp = spv::OpLogicalEqual;
+ else
+ binOp = spv::OpIEqual;
+ break;
+ case glslang::EOpNotEqual:
+ case glslang::EOpVectorNotEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdNotEqual;
+ else if (isBool)
+ binOp = spv::OpLogicalNotEqual;
+ else
+ binOp = spv::OpINotEqual;
+ break;
+ default:
+ break;
+ }
+
+ if (binOp != spv::OpNop) {
+ spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ return 0;
+}
+
+//
+// Translate AST matrix operation to SPV operation, already having SPV-based operands/types.
+// These can be any of:
+//
+// matrix * scalar
+// scalar * matrix
+// matrix * matrix linear algebraic
+// matrix * vector
+// vector * matrix
+// matrix * matrix componentwise
+// matrix op matrix op in {+, -, /}
+// matrix op scalar op in {+, -, /}
+// scalar op matrix op in {+, -, /}
+//
+spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id left, spv::Id right)
+{
+ bool firstClass = true;
+
+ // First, handle first-class matrix operations (* and matrix/scalar)
+ switch (op) {
+ case spv::OpFDiv:
+ if (builder.isMatrix(left) && builder.isScalar(right)) {
+ // turn matrix / scalar into a multiply...
+ spv::Id resultType = builder.getTypeId(right);
+ right = builder.createBinOp(spv::OpFDiv, resultType, builder.makeFpConstant(resultType, 1.0), right);
+ op = spv::OpMatrixTimesScalar;
+ } else
+ firstClass = false;
+ break;
+ case spv::OpMatrixTimesScalar:
+ if (builder.isMatrix(right) || builder.isCooperativeMatrix(right))
+ std::swap(left, right);
+ assert(builder.isScalar(right));
+ break;
+ case spv::OpVectorTimesMatrix:
+ assert(builder.isVector(left));
+ assert(builder.isMatrix(right));
+ break;
+ case spv::OpMatrixTimesVector:
+ assert(builder.isMatrix(left));
+ assert(builder.isVector(right));
+ break;
+ case spv::OpMatrixTimesMatrix:
+ assert(builder.isMatrix(left));
+ assert(builder.isMatrix(right));
+ break;
+ default:
+ firstClass = false;
+ break;
+ }
+
+ if (builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right))
+ firstClass = true;
+
+ if (firstClass) {
+ spv::Id result = builder.createBinOp(op, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ // Handle component-wise +, -, *, %, and / for all combinations of type.
+ // The result type of all of them is the same type as the (a) matrix operand.
+ // The algorithm is to:
+ // - break the matrix(es) into vectors
+ // - smear any scalar to a vector
+ // - do vector operations
+ // - make a matrix out the vector results
+ switch (op) {
+ case spv::OpFAdd:
+ case spv::OpFSub:
+ case spv::OpFDiv:
+ case spv::OpFMod:
+ case spv::OpFMul:
+ {
+ // one time set up...
+ bool leftMat = builder.isMatrix(left);
+ bool rightMat = builder.isMatrix(right);
+ unsigned int numCols = leftMat ? builder.getNumColumns(left) : builder.getNumColumns(right);
+ int numRows = leftMat ? builder.getNumRows(left) : builder.getNumRows(right);
+ spv::Id scalarType = builder.getScalarTypeId(typeId);
+ spv::Id vecType = builder.makeVectorType(scalarType, numRows);
+ std::vector<spv::Id> results;
+ spv::Id smearVec = spv::NoResult;
+ if (builder.isScalar(left))
+ smearVec = builder.smearScalar(decorations.precision, left, vecType);
+ else if (builder.isScalar(right))
+ smearVec = builder.smearScalar(decorations.precision, right, vecType);
+
+ // do each vector op
+ for (unsigned int c = 0; c < numCols; ++c) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(c);
+ spv::Id leftVec = leftMat ? builder.createCompositeExtract( left, vecType, indexes) : smearVec;
+ spv::Id rightVec = rightMat ? builder.createCompositeExtract(right, vecType, indexes) : smearVec;
+ spv::Id result = builder.createBinOp(op, vecType, leftVec, rightVec);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ results.push_back(builder.setPrecision(result, decorations.precision));
+ }
+
+ // put the pieces together
+ spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+ }
+ default:
+ assert(0);
+ return spv::NoResult;
+ }
+}
+
+spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id operand, glslang::TBasicType typeProxy)
+{
+ spv::Op unaryOp = spv::OpNop;
+ int extBuiltins = -1;
+ int libCall = -1;
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+
+ switch (op) {
+ case glslang::EOpNegative:
+ if (isFloat) {
+ unaryOp = spv::OpFNegate;
+ if (builder.isMatrixType(typeId))
+ return createUnaryMatrixOperation(unaryOp, decorations, typeId, operand, typeProxy);
+ } else
+ unaryOp = spv::OpSNegate;
+ break;
+
+ case glslang::EOpLogicalNot:
+ case glslang::EOpVectorLogicalNot:
+ unaryOp = spv::OpLogicalNot;
+ break;
+ case glslang::EOpBitwiseNot:
+ unaryOp = spv::OpNot;
+ break;
+
+ case glslang::EOpDeterminant:
+ libCall = spv::GLSLstd450Determinant;
+ break;
+ case glslang::EOpMatrixInverse:
+ libCall = spv::GLSLstd450MatrixInverse;
+ break;
+ case glslang::EOpTranspose:
+ unaryOp = spv::OpTranspose;
+ break;
+
+ case glslang::EOpRadians:
+ libCall = spv::GLSLstd450Radians;
+ break;
+ case glslang::EOpDegrees:
+ libCall = spv::GLSLstd450Degrees;
+ break;
+ case glslang::EOpSin:
+ libCall = spv::GLSLstd450Sin;
+ break;
+ case glslang::EOpCos:
+ libCall = spv::GLSLstd450Cos;
+ break;
+ case glslang::EOpTan:
+ libCall = spv::GLSLstd450Tan;
+ break;
+ case glslang::EOpAcos:
+ libCall = spv::GLSLstd450Acos;
+ break;
+ case glslang::EOpAsin:
+ libCall = spv::GLSLstd450Asin;
+ break;
+ case glslang::EOpAtan:
+ libCall = spv::GLSLstd450Atan;
+ break;
+
+ case glslang::EOpAcosh:
+ libCall = spv::GLSLstd450Acosh;
+ break;
+ case glslang::EOpAsinh:
+ libCall = spv::GLSLstd450Asinh;
+ break;
+ case glslang::EOpAtanh:
+ libCall = spv::GLSLstd450Atanh;
+ break;
+ case glslang::EOpTanh:
+ libCall = spv::GLSLstd450Tanh;
+ break;
+ case glslang::EOpCosh:
+ libCall = spv::GLSLstd450Cosh;
+ break;
+ case glslang::EOpSinh:
+ libCall = spv::GLSLstd450Sinh;
+ break;
+
+ case glslang::EOpLength:
+ libCall = spv::GLSLstd450Length;
+ break;
+ case glslang::EOpNormalize:
+ libCall = spv::GLSLstd450Normalize;
+ break;
+
+ case glslang::EOpExp:
+ libCall = spv::GLSLstd450Exp;
+ break;
+ case glslang::EOpLog:
+ libCall = spv::GLSLstd450Log;
+ break;
+ case glslang::EOpExp2:
+ libCall = spv::GLSLstd450Exp2;
+ break;
+ case glslang::EOpLog2:
+ libCall = spv::GLSLstd450Log2;
+ break;
+ case glslang::EOpSqrt:
+ libCall = spv::GLSLstd450Sqrt;
+ break;
+ case glslang::EOpInverseSqrt:
+ libCall = spv::GLSLstd450InverseSqrt;
+ break;
+
+ case glslang::EOpFloor:
+ libCall = spv::GLSLstd450Floor;
+ break;
+ case glslang::EOpTrunc:
+ libCall = spv::GLSLstd450Trunc;
+ break;
+ case glslang::EOpRound:
+ libCall = spv::GLSLstd450Round;
+ break;
+ case glslang::EOpRoundEven:
+ libCall = spv::GLSLstd450RoundEven;
+ break;
+ case glslang::EOpCeil:
+ libCall = spv::GLSLstd450Ceil;
+ break;
+ case glslang::EOpFract:
+ libCall = spv::GLSLstd450Fract;
+ break;
+
+ case glslang::EOpIsNan:
+ unaryOp = spv::OpIsNan;
+ break;
+ case glslang::EOpIsInf:
+ unaryOp = spv::OpIsInf;
+ break;
+ case glslang::EOpIsFinite:
+ unaryOp = spv::OpIsFinite;
+ break;
+
+ case glslang::EOpFloatBitsToInt:
+ case glslang::EOpFloatBitsToUint:
+ case glslang::EOpIntBitsToFloat:
+ case glslang::EOpUintBitsToFloat:
+ case glslang::EOpDoubleBitsToInt64:
+ case glslang::EOpDoubleBitsToUint64:
+ case glslang::EOpInt64BitsToDouble:
+ case glslang::EOpUint64BitsToDouble:
+ case glslang::EOpFloat16BitsToInt16:
+ case glslang::EOpFloat16BitsToUint16:
+ case glslang::EOpInt16BitsToFloat16:
+ case glslang::EOpUint16BitsToFloat16:
+ unaryOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpPackSnorm2x16:
+ libCall = spv::GLSLstd450PackSnorm2x16;
+ break;
+ case glslang::EOpUnpackSnorm2x16:
+ libCall = spv::GLSLstd450UnpackSnorm2x16;
+ break;
+ case glslang::EOpPackUnorm2x16:
+ libCall = spv::GLSLstd450PackUnorm2x16;
+ break;
+ case glslang::EOpUnpackUnorm2x16:
+ libCall = spv::GLSLstd450UnpackUnorm2x16;
+ break;
+ case glslang::EOpPackHalf2x16:
+ libCall = spv::GLSLstd450PackHalf2x16;
+ break;
+ case glslang::EOpUnpackHalf2x16:
+ libCall = spv::GLSLstd450UnpackHalf2x16;
+ break;
+ case glslang::EOpPackSnorm4x8:
+ libCall = spv::GLSLstd450PackSnorm4x8;
+ break;
+ case glslang::EOpUnpackSnorm4x8:
+ libCall = spv::GLSLstd450UnpackSnorm4x8;
+ break;
+ case glslang::EOpPackUnorm4x8:
+ libCall = spv::GLSLstd450PackUnorm4x8;
+ break;
+ case glslang::EOpUnpackUnorm4x8:
+ libCall = spv::GLSLstd450UnpackUnorm4x8;
+ break;
+ case glslang::EOpPackDouble2x32:
+ libCall = spv::GLSLstd450PackDouble2x32;
+ break;
+ case glslang::EOpUnpackDouble2x32:
+ libCall = spv::GLSLstd450UnpackDouble2x32;
+ break;
+
+ case glslang::EOpPackInt2x32:
+ case glslang::EOpUnpackInt2x32:
+ case glslang::EOpPackUint2x32:
+ case glslang::EOpUnpackUint2x32:
+ case glslang::EOpPack16:
+ case glslang::EOpPack32:
+ case glslang::EOpPack64:
+ case glslang::EOpUnpack32:
+ case glslang::EOpUnpack16:
+ case glslang::EOpUnpack8:
+ case glslang::EOpPackInt2x16:
+ case glslang::EOpUnpackInt2x16:
+ case glslang::EOpPackUint2x16:
+ case glslang::EOpUnpackUint2x16:
+ case glslang::EOpPackInt4x16:
+ case glslang::EOpUnpackInt4x16:
+ case glslang::EOpPackUint4x16:
+ case glslang::EOpUnpackUint4x16:
+ case glslang::EOpPackFloat2x16:
+ case glslang::EOpUnpackFloat2x16:
+ unaryOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpDPdx:
+ unaryOp = spv::OpDPdx;
+ break;
+ case glslang::EOpDPdy:
+ unaryOp = spv::OpDPdy;
+ break;
+ case glslang::EOpFwidth:
+ unaryOp = spv::OpFwidth;
+ break;
+ case glslang::EOpDPdxFine:
+ unaryOp = spv::OpDPdxFine;
+ break;
+ case glslang::EOpDPdyFine:
+ unaryOp = spv::OpDPdyFine;
+ break;
+ case glslang::EOpFwidthFine:
+ unaryOp = spv::OpFwidthFine;
+ break;
+ case glslang::EOpDPdxCoarse:
+ unaryOp = spv::OpDPdxCoarse;
+ break;
+ case glslang::EOpDPdyCoarse:
+ unaryOp = spv::OpDPdyCoarse;
+ break;
+ case glslang::EOpFwidthCoarse:
+ unaryOp = spv::OpFwidthCoarse;
+ break;
+ case glslang::EOpInterpolateAtCentroid:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtCentroid;
+ break;
+ case glslang::EOpAny:
+ unaryOp = spv::OpAny;
+ break;
+ case glslang::EOpAll:
+ unaryOp = spv::OpAll;
+ break;
+
+ case glslang::EOpAbs:
+ if (isFloat)
+ libCall = spv::GLSLstd450FAbs;
+ else
+ libCall = spv::GLSLstd450SAbs;
+ break;
+ case glslang::EOpSign:
+ if (isFloat)
+ libCall = spv::GLSLstd450FSign;
+ else
+ libCall = spv::GLSLstd450SSign;
+ break;
+
+ case glslang::EOpAtomicCounterIncrement:
+ case glslang::EOpAtomicCounterDecrement:
+ case glslang::EOpAtomicCounter:
+ {
+ // Handle all of the atomics in one place, in createAtomicOperation()
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createAtomicOperation(op, decorations.precision, typeId, operands, typeProxy);
+ }
+
+ case glslang::EOpBitFieldReverse:
+ unaryOp = spv::OpBitReverse;
+ break;
+ case glslang::EOpBitCount:
+ unaryOp = spv::OpBitCount;
+ break;
+ case glslang::EOpFindLSB:
+ libCall = spv::GLSLstd450FindILsb;
+ break;
+ case glslang::EOpFindMSB:
+ if (isUnsigned)
+ libCall = spv::GLSLstd450FindUMsb;
+ else
+ libCall = spv::GLSLstd450FindSMsb;
+ break;
+
+ case glslang::EOpBallot:
+ case glslang::EOpReadFirstInvocation:
+ case glslang::EOpAnyInvocation:
+ case glslang::EOpAllInvocations:
+ case glslang::EOpAllInvocationsEqual:
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+#endif
+ {
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createInvocationsOperation(op, typeId, operands, typeProxy);
+ }
+ case glslang::EOpSubgroupAll:
+ case glslang::EOpSubgroupAny:
+ case glslang::EOpSubgroupAllEqual:
+ case glslang::EOpSubgroupBroadcastFirst:
+ case glslang::EOpSubgroupBallot:
+ case glslang::EOpSubgroupInverseBallot:
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupBallotFindLSB:
+ case glslang::EOpSubgroupBallotFindMSB:
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal: {
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createSubgroupOperation(op, typeId, operands, typeProxy);
+ }
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMbcnt:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::MbcntAMD;
+ break;
+
+ case glslang::EOpCubeFaceIndex:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+ libCall = spv::CubeFaceIndexAMD;
+ break;
+
+ case glslang::EOpCubeFaceCoord:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+ libCall = spv::CubeFaceCoordAMD;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartition:
+ unaryOp = spv::OpGroupNonUniformPartitionNV;
+ break;
+#endif
+ case glslang::EOpConstructReference:
+ unaryOp = spv::OpBitcast;
+ break;
+ default:
+ return 0;
+ }
+
+ spv::Id id;
+ if (libCall >= 0) {
+ std::vector<spv::Id> args;
+ args.push_back(operand);
+ id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, args);
+ } else {
+ id = builder.createUnaryOp(unaryOp, typeId, operand);
+ }
+
+ builder.addDecoration(id, decorations.noContraction);
+ builder.addDecoration(id, decorations.nonUniform);
+ return builder.setPrecision(id, decorations.precision);
+}
+
+// Create a unary operation on a matrix
+spv::Id TGlslangToSpvTraverser::createUnaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id operand, glslang::TBasicType /* typeProxy */)
+{
+ // Handle unary operations vector by vector.
+ // The result type is the same type as the original type.
+ // The algorithm is to:
+ // - break the matrix into vectors
+ // - apply the operation to each vector
+ // - make a matrix out the vector results
+
+ // get the types sorted out
+ int numCols = builder.getNumColumns(operand);
+ int numRows = builder.getNumRows(operand);
+ spv::Id srcVecType = builder.makeVectorType(builder.getScalarTypeId(builder.getTypeId(operand)), numRows);
+ spv::Id destVecType = builder.makeVectorType(builder.getScalarTypeId(typeId), numRows);
+ std::vector<spv::Id> results;
+
+ // do each vector op
+ for (int c = 0; c < numCols; ++c) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(c);
+ spv::Id srcVec = builder.createCompositeExtract(operand, srcVecType, indexes);
+ spv::Id destVec = builder.createUnaryOp(op, destVecType, srcVec);
+ builder.addDecoration(destVec, decorations.noContraction);
+ builder.addDecoration(destVec, decorations.nonUniform);
+ results.push_back(builder.setPrecision(destVec, decorations.precision));
+ }
+
+ // put the pieces together
+ spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+}
+
+// For converting integers where both the bitwidth and the signedness could
+// change, but only do the width change here. The caller is still responsible
+// for the signedness conversion.
+spv::Id TGlslangToSpvTraverser::createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize)
+{
+ // Get the result type width, based on the type to convert to.
+ int width = 32;
+ switch(op) {
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUint64ToInt8:
+ width = 8;
+ break;
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUint64ToInt16:
+ width = 16;
+ break;
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint64ToInt:
+ width = 32;
+ break;
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt64:
+ width = 64;
+ break;
+
+ default:
+ assert(false && "Default missing");
+ break;
+ }
+
+ // Get the conversion operation and result type,
+ // based on the target width, but the source type.
+ spv::Id type = spv::NoType;
+ spv::Op convOp = spv::OpNop;
+ switch(op) {
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvInt64ToUint:
+ convOp = spv::OpSConvert;
+ type = builder.makeIntType(width);
+ break;
+ default:
+ convOp = spv::OpUConvert;
+ type = builder.makeUintType(width);
+ break;
+ }
+
+ if (vectorSize > 0)
+ type = builder.makeVectorType(type, vectorSize);
+
+ return builder.createUnaryOp(convOp, type, operand);
+}
+
+spv::Id TGlslangToSpvTraverser::createConversion(glslang::TOperator op, OpDecorations& decorations, spv::Id destType,
+ spv::Id operand, glslang::TBasicType typeProxy)
+{
+ spv::Op convOp = spv::OpNop;
+ spv::Id zero = 0;
+ spv::Id one = 0;
+
+ int vectorSize = builder.isVectorType(destType) ? builder.getNumTypeComponents(destType) : 0;
+
+ switch (op) {
+ case glslang::EOpConvInt8ToBool:
+ case glslang::EOpConvUint8ToBool:
+ zero = builder.makeUint8Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvInt16ToBool:
+ case glslang::EOpConvUint16ToBool:
+ zero = builder.makeUint16Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvIntToBool:
+ case glslang::EOpConvUintToBool:
+ zero = builder.makeUintConstant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvInt64ToBool:
+ case glslang::EOpConvUint64ToBool:
+ zero = builder.makeUint64Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+
+ case glslang::EOpConvFloatToBool:
+ zero = builder.makeFloatConstant(0.0F);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvDoubleToBool:
+ zero = builder.makeDoubleConstant(0.0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvFloat16ToBool:
+ zero = builder.makeFloat16Constant(0.0F);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvBoolToFloat:
+ convOp = spv::OpSelect;
+ zero = builder.makeFloatConstant(0.0F);
+ one = builder.makeFloatConstant(1.0F);
+ break;
+
+ case glslang::EOpConvBoolToDouble:
+ convOp = spv::OpSelect;
+ zero = builder.makeDoubleConstant(0.0);
+ one = builder.makeDoubleConstant(1.0);
+ break;
+
+ case glslang::EOpConvBoolToFloat16:
+ convOp = spv::OpSelect;
+ zero = builder.makeFloat16Constant(0.0F);
+ one = builder.makeFloat16Constant(1.0F);
+ break;
+
+ case glslang::EOpConvBoolToInt8:
+ zero = builder.makeInt8Constant(0);
+ one = builder.makeInt8Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint8:
+ zero = builder.makeUint8Constant(0);
+ one = builder.makeUint8Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToInt16:
+ zero = builder.makeInt16Constant(0);
+ one = builder.makeInt16Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint16:
+ zero = builder.makeUint16Constant(0);
+ one = builder.makeUint16Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToInt:
+ case glslang::EOpConvBoolToInt64:
+ if (op == glslang::EOpConvBoolToInt64)
+ zero = builder.makeInt64Constant(0);
+ else
+ zero = builder.makeIntConstant(0);
+
+ if (op == glslang::EOpConvBoolToInt64)
+ one = builder.makeInt64Constant(1);
+ else
+ one = builder.makeIntConstant(1);
+
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint:
+ case glslang::EOpConvBoolToUint64:
+ if (op == glslang::EOpConvBoolToUint64)
+ zero = builder.makeUint64Constant(0);
+ else
+ zero = builder.makeUintConstant(0);
+
+ if (op == glslang::EOpConvBoolToUint64)
+ one = builder.makeUint64Constant(1);
+ else
+ one = builder.makeUintConstant(1);
+
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvInt8ToFloat16:
+ case glslang::EOpConvInt8ToFloat:
+ case glslang::EOpConvInt8ToDouble:
+ case glslang::EOpConvInt16ToFloat16:
+ case glslang::EOpConvInt16ToFloat:
+ case glslang::EOpConvInt16ToDouble:
+ case glslang::EOpConvIntToFloat16:
+ case glslang::EOpConvIntToFloat:
+ case glslang::EOpConvIntToDouble:
+ case glslang::EOpConvInt64ToFloat:
+ case glslang::EOpConvInt64ToDouble:
+ case glslang::EOpConvInt64ToFloat16:
+ convOp = spv::OpConvertSToF;
+ break;
+
+ case glslang::EOpConvUint8ToFloat16:
+ case glslang::EOpConvUint8ToFloat:
+ case glslang::EOpConvUint8ToDouble:
+ case glslang::EOpConvUint16ToFloat16:
+ case glslang::EOpConvUint16ToFloat:
+ case glslang::EOpConvUint16ToDouble:
+ case glslang::EOpConvUintToFloat16:
+ case glslang::EOpConvUintToFloat:
+ case glslang::EOpConvUintToDouble:
+ case glslang::EOpConvUint64ToFloat:
+ case glslang::EOpConvUint64ToDouble:
+ case glslang::EOpConvUint64ToFloat16:
+ convOp = spv::OpConvertUToF;
+ break;
+
+ case glslang::EOpConvDoubleToFloat:
+ case glslang::EOpConvFloatToDouble:
+ case glslang::EOpConvDoubleToFloat16:
+ case glslang::EOpConvFloat16ToDouble:
+ case glslang::EOpConvFloatToFloat16:
+ case glslang::EOpConvFloat16ToFloat:
+ convOp = spv::OpFConvert;
+ if (builder.isMatrixType(destType))
+ return createUnaryMatrixOperation(convOp, decorations, destType, operand, typeProxy);
+ break;
+
+ case glslang::EOpConvFloat16ToInt8:
+ case glslang::EOpConvFloatToInt8:
+ case glslang::EOpConvDoubleToInt8:
+ case glslang::EOpConvFloat16ToInt16:
+ case glslang::EOpConvFloatToInt16:
+ case glslang::EOpConvDoubleToInt16:
+ case glslang::EOpConvFloat16ToInt:
+ case glslang::EOpConvFloatToInt:
+ case glslang::EOpConvDoubleToInt:
+ case glslang::EOpConvFloat16ToInt64:
+ case glslang::EOpConvFloatToInt64:
+ case glslang::EOpConvDoubleToInt64:
+ convOp = spv::OpConvertFToS;
+ break;
+
+ case glslang::EOpConvUint8ToInt8:
+ case glslang::EOpConvInt8ToUint8:
+ case glslang::EOpConvUint16ToInt16:
+ case glslang::EOpConvInt16ToUint16:
+ case glslang::EOpConvUintToInt:
+ case glslang::EOpConvIntToUint:
+ case glslang::EOpConvUint64ToInt64:
+ case glslang::EOpConvInt64ToUint64:
+ if (builder.isInSpecConstCodeGenMode()) {
+ // Build zero scalar or vector for OpIAdd.
+ if(op == glslang::EOpConvUint8ToInt8 || op == glslang::EOpConvInt8ToUint8) {
+ zero = builder.makeUint8Constant(0);
+ } else if (op == glslang::EOpConvUint16ToInt16 || op == glslang::EOpConvInt16ToUint16) {
+ zero = builder.makeUint16Constant(0);
+ } else if (op == glslang::EOpConvUint64ToInt64 || op == glslang::EOpConvInt64ToUint64) {
+ zero = builder.makeUint64Constant(0);
+ } else {
+ zero = builder.makeUintConstant(0);
+ }
+ zero = makeSmearedConstant(zero, vectorSize);
+ // Use OpIAdd, instead of OpBitcast to do the conversion when
+ // generating for OpSpecConstantOp instruction.
+ return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+ }
+ // For normal run-time conversion instruction, use OpBitcast.
+ convOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpConvFloat16ToUint8:
+ case glslang::EOpConvFloatToUint8:
+ case glslang::EOpConvDoubleToUint8:
+ case glslang::EOpConvFloat16ToUint16:
+ case glslang::EOpConvFloatToUint16:
+ case glslang::EOpConvDoubleToUint16:
+ case glslang::EOpConvFloat16ToUint:
+ case glslang::EOpConvFloatToUint:
+ case glslang::EOpConvDoubleToUint:
+ case glslang::EOpConvFloatToUint64:
+ case glslang::EOpConvDoubleToUint64:
+ case glslang::EOpConvFloat16ToUint64:
+ convOp = spv::OpConvertFToU;
+ break;
+
+ case glslang::EOpConvInt8ToInt16:
+ case glslang::EOpConvInt8ToInt:
+ case glslang::EOpConvInt8ToInt64:
+ case glslang::EOpConvInt16ToInt8:
+ case glslang::EOpConvInt16ToInt:
+ case glslang::EOpConvInt16ToInt64:
+ case glslang::EOpConvIntToInt8:
+ case glslang::EOpConvIntToInt16:
+ case glslang::EOpConvIntToInt64:
+ case glslang::EOpConvInt64ToInt8:
+ case glslang::EOpConvInt64ToInt16:
+ case glslang::EOpConvInt64ToInt:
+ convOp = spv::OpSConvert;
+ break;
+
+ case glslang::EOpConvUint8ToUint16:
+ case glslang::EOpConvUint8ToUint:
+ case glslang::EOpConvUint8ToUint64:
+ case glslang::EOpConvUint16ToUint8:
+ case glslang::EOpConvUint16ToUint:
+ case glslang::EOpConvUint16ToUint64:
+ case glslang::EOpConvUintToUint8:
+ case glslang::EOpConvUintToUint16:
+ case glslang::EOpConvUintToUint64:
+ case glslang::EOpConvUint64ToUint8:
+ case glslang::EOpConvUint64ToUint16:
+ case glslang::EOpConvUint64ToUint:
+ convOp = spv::OpUConvert;
+ break;
+
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUintToInt64:
+ case glslang::EOpConvUint64ToInt8:
+ case glslang::EOpConvUint64ToInt16:
+ case glslang::EOpConvUint64ToInt:
+ // OpSConvert/OpUConvert + OpBitCast
+ operand = createIntWidthConversion(op, operand, vectorSize);
+
+ if (builder.isInSpecConstCodeGenMode()) {
+ // Build zero scalar or vector for OpIAdd.
+ switch(op) {
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUint64ToInt8:
+ zero = builder.makeUint8Constant(0);
+ break;
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUint64ToInt16:
+ zero = builder.makeUint16Constant(0);
+ break;
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint64ToInt:
+ zero = builder.makeUintConstant(0);
+ break;
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt64:
+ zero = builder.makeUint64Constant(0);
+ break;
+ default:
+ assert(false && "Default missing");
+ break;
+ }
+ zero = makeSmearedConstant(zero, vectorSize);
+ // Use OpIAdd, instead of OpBitcast to do the conversion when
+ // generating for OpSpecConstantOp instruction.
+ return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+ }
+ // For normal run-time conversion instruction, use OpBitcast.
+ convOp = spv::OpBitcast;
+ break;
+ case glslang::EOpConvUint64ToPtr:
+ convOp = spv::OpConvertUToPtr;
+ break;
+ case glslang::EOpConvPtrToUint64:
+ convOp = spv::OpConvertPtrToU;
+ break;
+ default:
+ break;
+ }
+
+ spv::Id result = 0;
+ if (convOp == spv::OpNop)
+ return result;
+
+ if (convOp == spv::OpSelect) {
+ zero = makeSmearedConstant(zero, vectorSize);
+ one = makeSmearedConstant(one, vectorSize);
+ result = builder.createTriOp(convOp, destType, operand, one, zero);
+ } else
+ result = builder.createUnaryOp(convOp, destType, operand);
+
+ result = builder.setPrecision(result, decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+}
+
+spv::Id TGlslangToSpvTraverser::makeSmearedConstant(spv::Id constant, int vectorSize)
+{
+ if (vectorSize == 0)
+ return constant;
+
+ spv::Id vectorTypeId = builder.makeVectorType(builder.getTypeId(constant), vectorSize);
+ std::vector<spv::Id> components;
+ for (int c = 0; c < vectorSize; ++c)
+ components.push_back(constant);
+ return builder.makeCompositeConstant(vectorTypeId, components);
+}
+
+// For glslang ops that map to SPV atomic opCodes
+spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv::Decoration /*precision*/, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ spv::Op opCode = spv::OpNop;
+
+ switch (op) {
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpImageAtomicAdd:
+ case glslang::EOpAtomicCounterAdd:
+ opCode = spv::OpAtomicIAdd;
+ break;
+ case glslang::EOpAtomicCounterSubtract:
+ opCode = spv::OpAtomicISub;
+ break;
+ case glslang::EOpAtomicMin:
+ case glslang::EOpImageAtomicMin:
+ case glslang::EOpAtomicCounterMin:
+ opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? spv::OpAtomicUMin : spv::OpAtomicSMin;
+ break;
+ case glslang::EOpAtomicMax:
+ case glslang::EOpImageAtomicMax:
+ case glslang::EOpAtomicCounterMax:
+ opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? spv::OpAtomicUMax : spv::OpAtomicSMax;
+ break;
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpImageAtomicAnd:
+ case glslang::EOpAtomicCounterAnd:
+ opCode = spv::OpAtomicAnd;
+ break;
+ case glslang::EOpAtomicOr:
+ case glslang::EOpImageAtomicOr:
+ case glslang::EOpAtomicCounterOr:
+ opCode = spv::OpAtomicOr;
+ break;
+ case glslang::EOpAtomicXor:
+ case glslang::EOpImageAtomicXor:
+ case glslang::EOpAtomicCounterXor:
+ opCode = spv::OpAtomicXor;
+ break;
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpImageAtomicExchange:
+ case glslang::EOpAtomicCounterExchange:
+ opCode = spv::OpAtomicExchange;
+ break;
+ case glslang::EOpAtomicCompSwap:
+ case glslang::EOpImageAtomicCompSwap:
+ case glslang::EOpAtomicCounterCompSwap:
+ opCode = spv::OpAtomicCompareExchange;
+ break;
+ case glslang::EOpAtomicCounterIncrement:
+ opCode = spv::OpAtomicIIncrement;
+ break;
+ case glslang::EOpAtomicCounterDecrement:
+ opCode = spv::OpAtomicIDecrement;
+ break;
+ case glslang::EOpAtomicCounter:
+ case glslang::EOpImageAtomicLoad:
+ case glslang::EOpAtomicLoad:
+ opCode = spv::OpAtomicLoad;
+ break;
+ case glslang::EOpAtomicStore:
+ case glslang::EOpImageAtomicStore:
+ opCode = spv::OpAtomicStore;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (typeProxy == glslang::EbtInt64 || typeProxy == glslang::EbtUint64)
+ builder.addCapability(spv::CapabilityInt64Atomics);
+
+ // Sort out the operands
+ // - mapping from glslang -> SPV
+ // - there are extra SPV operands that are optional in glslang
+ // - compare-exchange swaps the value and comparator
+ // - compare-exchange has an extra memory semantics
+ // - EOpAtomicCounterDecrement needs a post decrement
+ spv::Id pointerId = 0, compareId = 0, valueId = 0;
+ // scope defaults to Device in the old model, QueueFamilyKHR in the new model
+ spv::Id scopeId;
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR);
+ } else {
+ scopeId = builder.makeUintConstant(spv::ScopeDevice);
+ }
+ // semantics default to relaxed
+ spv::Id semanticsId = builder.makeUintConstant(spv::MemorySemanticsMaskNone);
+ spv::Id semanticsId2 = semanticsId;
+
+ pointerId = operands[0];
+ if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) {
+ // no additional operands
+ } else if (opCode == spv::OpAtomicCompareExchange) {
+ compareId = operands[1];
+ valueId = operands[2];
+ if (operands.size() > 3) {
+ scopeId = operands[3];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5]));
+ semanticsId2 = builder.makeUintConstant(builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7]));
+ }
+ } else if (opCode == spv::OpAtomicLoad) {
+ if (operands.size() > 1) {
+ scopeId = operands[1];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]));
+ }
+ } else {
+ // atomic store or RMW
+ valueId = operands[1];
+ if (operands.size() > 2) {
+ scopeId = operands[2];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4]));
+ }
+ }
+
+ // Check for capabilities
+ unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2);
+ if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+
+ if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+
+ std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
+ spvAtomicOperands.push_back(pointerId);
+ spvAtomicOperands.push_back(scopeId);
+ spvAtomicOperands.push_back(semanticsId);
+ if (opCode == spv::OpAtomicCompareExchange) {
+ spvAtomicOperands.push_back(semanticsId2);
+ spvAtomicOperands.push_back(valueId);
+ spvAtomicOperands.push_back(compareId);
+ } else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) {
+ spvAtomicOperands.push_back(valueId);
+ }
+
+ if (opCode == spv::OpAtomicStore) {
+ builder.createNoResultOp(opCode, spvAtomicOperands);
+ return 0;
+ } else {
+ spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
+
+ // GLSL and HLSL atomic-counter decrement return post-decrement value,
+ // while SPIR-V returns pre-decrement value. Translate between these semantics.
+ if (op == glslang::EOpAtomicCounterDecrement)
+ resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
+
+ return resultId;
+ }
+}
+
+// Create group invocation operations.
+spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+#ifdef AMD_EXTENSIONS
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+#endif
+
+ spv::Op opCode = spv::OpNop;
+ std::vector<spv::IdImmediate> spvGroupOperands;
+ spv::GroupOperation groupOperation = spv::GroupOperationMax;
+
+ if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation ||
+ op == glslang::EOpReadInvocation) {
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ } else if (op == glslang::EOpAnyInvocation ||
+ op == glslang::EOpAllInvocations ||
+ op == glslang::EOpAllInvocationsEqual) {
+ builder.addExtension(spv::E_SPV_KHR_subgroup_vote);
+ builder.addCapability(spv::CapabilitySubgroupVoteKHR);
+ } else {
+ builder.addCapability(spv::CapabilityGroups);
+#ifdef AMD_EXTENSIONS
+ if (op == glslang::EOpMinInvocationsNonUniform ||
+ op == glslang::EOpMaxInvocationsNonUniform ||
+ op == glslang::EOpAddInvocationsNonUniform ||
+ op == glslang::EOpMinInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpAddInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMinInvocationsExclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsExclusiveScanNonUniform ||
+ op == glslang::EOpAddInvocationsExclusiveScanNonUniform)
+ builder.addExtension(spv::E_SPV_AMD_shader_ballot);
+#endif
+
+#ifdef AMD_EXTENSIONS
+ switch (op) {
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ groupOperation = spv::GroupOperationReduce;
+ break;
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ groupOperation = spv::GroupOperationInclusiveScan;
+ break;
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+ groupOperation = spv::GroupOperationExclusiveScan;
+ break;
+ default:
+ break;
+ }
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ if (groupOperation != spv::GroupOperationMax) {
+ spv::IdImmediate groupOp = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOp);
+ }
+#endif
+ }
+
+ for (auto opIt = operands.begin(); opIt != operands.end(); ++opIt) {
+ spv::IdImmediate op = { true, *opIt };
+ spvGroupOperands.push_back(op);
+ }
+
+ switch (op) {
+ case glslang::EOpAnyInvocation:
+ opCode = spv::OpSubgroupAnyKHR;
+ break;
+ case glslang::EOpAllInvocations:
+ opCode = spv::OpSubgroupAllKHR;
+ break;
+ case glslang::EOpAllInvocationsEqual:
+ opCode = spv::OpSubgroupAllEqualKHR;
+ break;
+ case glslang::EOpReadInvocation:
+ opCode = spv::OpSubgroupReadInvocationKHR;
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+ break;
+ case glslang::EOpReadFirstInvocation:
+ opCode = spv::OpSubgroupFirstInvocationKHR;
+ break;
+ case glslang::EOpBallot:
+ {
+ // NOTE: According to the spec, the result type of "OpSubgroupBallotKHR" must be a 4 component vector of 32
+ // bit integer types. The GLSL built-in function "ballotARB()" assumes the maximum number of invocations in
+ // a subgroup is 64. Thus, we have to convert uvec4.xy to uint64_t as follow:
+ //
+ // result = Bitcast(SubgroupBallotKHR(Predicate).xy)
+ //
+ spv::Id uintType = builder.makeUintType(32);
+ spv::Id uvec4Type = builder.makeVectorType(uintType, 4);
+ spv::Id result = builder.createOp(spv::OpSubgroupBallotKHR, uvec4Type, spvGroupOperands);
+
+ std::vector<spv::Id> components;
+ components.push_back(builder.createCompositeExtract(result, uintType, 0));
+ components.push_back(builder.createCompositeExtract(result, uintType, 1));
+
+ spv::Id uvec2Type = builder.makeVectorType(uintType, 2);
+ return builder.createUnaryOp(spv::OpBitcast, typeId,
+ builder.createCompositeConstruct(uvec2Type, components));
+ }
+
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ if (op == glslang::EOpMinInvocations ||
+ op == glslang::EOpMinInvocationsInclusiveScan ||
+ op == glslang::EOpMinInvocationsExclusiveScan) {
+ if (isFloat)
+ opCode = spv::OpGroupFMin;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMin;
+ else
+ opCode = spv::OpGroupSMin;
+ }
+ } else if (op == glslang::EOpMaxInvocations ||
+ op == glslang::EOpMaxInvocationsInclusiveScan ||
+ op == glslang::EOpMaxInvocationsExclusiveScan) {
+ if (isFloat)
+ opCode = spv::OpGroupFMax;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMax;
+ else
+ opCode = spv::OpGroupSMax;
+ }
+ } else {
+ if (isFloat)
+ opCode = spv::OpGroupFAdd;
+ else
+ opCode = spv::OpGroupIAdd;
+ }
+
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+
+ break;
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+ if (op == glslang::EOpMinInvocationsNonUniform ||
+ op == glslang::EOpMinInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMinInvocationsExclusiveScanNonUniform) {
+ if (isFloat)
+ opCode = spv::OpGroupFMinNonUniformAMD;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMinNonUniformAMD;
+ else
+ opCode = spv::OpGroupSMinNonUniformAMD;
+ }
+ }
+ else if (op == glslang::EOpMaxInvocationsNonUniform ||
+ op == glslang::EOpMaxInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsExclusiveScanNonUniform) {
+ if (isFloat)
+ opCode = spv::OpGroupFMaxNonUniformAMD;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMaxNonUniformAMD;
+ else
+ opCode = spv::OpGroupSMaxNonUniformAMD;
+ }
+ }
+ else {
+ if (isFloat)
+ opCode = spv::OpGroupFAddNonUniformAMD;
+ else
+ opCode = spv::OpGroupIAddNonUniformAMD;
+ }
+
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+
+ break;
+#endif
+ default:
+ logger->missingFunctionality("invocation operation");
+ return spv::NoResult;
+ }
+
+ assert(opCode != spv::OpNop);
+ return builder.createOp(opCode, typeId, spvGroupOperands);
+}
+
+// Create group invocation operations on a vector
+spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation,
+ spv::Id typeId, std::vector<spv::Id>& operands)
+{
+#ifdef AMD_EXTENSIONS
+ assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+ op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+ op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast ||
+ op == spv::OpSubgroupReadInvocationKHR ||
+ op == spv::OpGroupFMinNonUniformAMD || op == spv::OpGroupUMinNonUniformAMD || op == spv::OpGroupSMinNonUniformAMD ||
+ op == spv::OpGroupFMaxNonUniformAMD || op == spv::OpGroupUMaxNonUniformAMD || op == spv::OpGroupSMaxNonUniformAMD ||
+ op == spv::OpGroupFAddNonUniformAMD || op == spv::OpGroupIAddNonUniformAMD);
+#else
+ assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+ op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+ op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast ||
+ op == spv::OpSubgroupReadInvocationKHR);
+#endif
+
+ // Handle group invocation operations scalar by scalar.
+ // The result type is the same type as the original type.
+ // The algorithm is to:
+ // - break the vector into scalars
+ // - apply the operation to each scalar
+ // - make a vector out the scalar results
+
+ // get the types sorted out
+ int numComponents = builder.getNumComponents(operands[0]);
+ spv::Id scalarType = builder.getScalarTypeId(builder.getTypeId(operands[0]));
+ std::vector<spv::Id> results;
+
+ // do each scalar op
+ for (int comp = 0; comp < numComponents; ++comp) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(comp);
+ spv::IdImmediate scalar = { true, builder.createCompositeExtract(operands[0], scalarType, indexes) };
+ std::vector<spv::IdImmediate> spvGroupOperands;
+ if (op == spv::OpSubgroupReadInvocationKHR) {
+ spvGroupOperands.push_back(scalar);
+ spv::IdImmediate operand = { true, operands[1] };
+ spvGroupOperands.push_back(operand);
+ } else if (op == spv::OpGroupBroadcast) {
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ spvGroupOperands.push_back(scalar);
+ spv::IdImmediate operand = { true, operands[1] };
+ spvGroupOperands.push_back(operand);
+ } else {
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ spv::IdImmediate groupOp = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOp);
+ spvGroupOperands.push_back(scalar);
+ }
+
+ results.push_back(builder.createOp(op, scalarType, spvGroupOperands));
+ }
+
+ // put the pieces together
+ return builder.createCompositeConstruct(typeId, results);
+}
+
+// Create subgroup invocation operations.
+spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, spv::Id typeId,
+ std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ // Add the required capabilities.
+ switch (op) {
+ case glslang::EOpSubgroupElect:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ break;
+ case glslang::EOpSubgroupAll:
+ case glslang::EOpSubgroupAny:
+ case glslang::EOpSubgroupAllEqual:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformVote);
+ break;
+ case glslang::EOpSubgroupBroadcast:
+ case glslang::EOpSubgroupBroadcastFirst:
+ case glslang::EOpSubgroupBallot:
+ case glslang::EOpSubgroupInverseBallot:
+ case glslang::EOpSubgroupBallotBitExtract:
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupBallotFindLSB:
+ case glslang::EOpSubgroupBallotFindMSB:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ break;
+ case glslang::EOpSubgroupShuffle:
+ case glslang::EOpSubgroupShuffleXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformShuffle);
+ break;
+ case glslang::EOpSubgroupShuffleUp:
+ case glslang::EOpSubgroupShuffleDown:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformShuffleRelative);
+ break;
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformArithmetic);
+ break;
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformClustered);
+ break;
+ case glslang::EOpSubgroupQuadBroadcast:
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformQuad);
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+ builder.addExtension(spv::E_SPV_NV_shader_subgroup_partitioned);
+ builder.addCapability(spv::CapabilityGroupNonUniformPartitionedNV);
+ break;
+#endif
+ default: assert(0 && "Unhandled subgroup operation!");
+ }
+
+ const bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+ const bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble;
+ const bool isBool = typeProxy == glslang::EbtBool;
+
+ spv::Op opCode = spv::OpNop;
+
+ // Figure out which opcode to use.
+ switch (op) {
+ case glslang::EOpSubgroupElect: opCode = spv::OpGroupNonUniformElect; break;
+ case glslang::EOpSubgroupAll: opCode = spv::OpGroupNonUniformAll; break;
+ case glslang::EOpSubgroupAny: opCode = spv::OpGroupNonUniformAny; break;
+ case glslang::EOpSubgroupAllEqual: opCode = spv::OpGroupNonUniformAllEqual; break;
+ case glslang::EOpSubgroupBroadcast: opCode = spv::OpGroupNonUniformBroadcast; break;
+ case glslang::EOpSubgroupBroadcastFirst: opCode = spv::OpGroupNonUniformBroadcastFirst; break;
+ case glslang::EOpSubgroupBallot: opCode = spv::OpGroupNonUniformBallot; break;
+ case glslang::EOpSubgroupInverseBallot: opCode = spv::OpGroupNonUniformInverseBallot; break;
+ case glslang::EOpSubgroupBallotBitExtract: opCode = spv::OpGroupNonUniformBallotBitExtract; break;
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount: opCode = spv::OpGroupNonUniformBallotBitCount; break;
+ case glslang::EOpSubgroupBallotFindLSB: opCode = spv::OpGroupNonUniformBallotFindLSB; break;
+ case glslang::EOpSubgroupBallotFindMSB: opCode = spv::OpGroupNonUniformBallotFindMSB; break;
+ case glslang::EOpSubgroupShuffle: opCode = spv::OpGroupNonUniformShuffle; break;
+ case glslang::EOpSubgroupShuffleXor: opCode = spv::OpGroupNonUniformShuffleXor; break;
+ case glslang::EOpSubgroupShuffleUp: opCode = spv::OpGroupNonUniformShuffleUp; break;
+ case glslang::EOpSubgroupShuffleDown: opCode = spv::OpGroupNonUniformShuffleDown; break;
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupClusteredAdd:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFAdd;
+ } else {
+ opCode = spv::OpGroupNonUniformIAdd;
+ }
+ break;
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupClusteredMul:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMul;
+ } else {
+ opCode = spv::OpGroupNonUniformIMul;
+ }
+ break;
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupClusteredMin:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMin;
+ } else if (isUnsigned) {
+ opCode = spv::OpGroupNonUniformUMin;
+ } else {
+ opCode = spv::OpGroupNonUniformSMin;
+ }
+ break;
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupClusteredMax:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMax;
+ } else if (isUnsigned) {
+ opCode = spv::OpGroupNonUniformUMax;
+ } else {
+ opCode = spv::OpGroupNonUniformSMax;
+ }
+ break;
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupClusteredAnd:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalAnd;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseAnd;
+ }
+ break;
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupClusteredOr:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalOr;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseOr;
+ }
+ break;
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveXor:
+ case glslang::EOpSubgroupClusteredXor:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalXor;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseXor;
+ }
+ break;
+ case glslang::EOpSubgroupQuadBroadcast: opCode = spv::OpGroupNonUniformQuadBroadcast; break;
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal: opCode = spv::OpGroupNonUniformQuadSwap; break;
+ default: assert(0 && "Unhandled subgroup operation!");
+ }
+
+ // get the right Group Operation
+ spv::GroupOperation groupOperation = spv::GroupOperationMax;
+ switch (op) {
+ default:
+ break;
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ groupOperation = spv::GroupOperationReduce;
+ break;
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ groupOperation = spv::GroupOperationInclusiveScan;
+ break;
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ groupOperation = spv::GroupOperationExclusiveScan;
+ break;
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ groupOperation = spv::GroupOperationClusteredReduce;
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ groupOperation = spv::GroupOperationPartitionedReduceNV;
+ break;
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ groupOperation = spv::GroupOperationPartitionedInclusiveScanNV;
+ break;
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+ groupOperation = spv::GroupOperationPartitionedExclusiveScanNV;
+ break;
+#endif
+ }
+
+ // build the instruction
+ std::vector<spv::IdImmediate> spvGroupOperands;
+
+ // Every operation begins with the Execution Scope operand.
+ spv::IdImmediate executionScope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(executionScope);
+
+ // Next, for all operations that use a Group Operation, push that as an operand.
+ if (groupOperation != spv::GroupOperationMax) {
+ spv::IdImmediate groupOperand = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOperand);
+ }
+
+ // Push back the operands next.
+ for (auto opIt = operands.cbegin(); opIt != operands.cend(); ++opIt) {
+ spv::IdImmediate operand = { true, *opIt };
+ spvGroupOperands.push_back(operand);
+ }
+
+ // Some opcodes have additional operands.
+ spv::Id directionId = spv::NoResult;
+ switch (op) {
+ default: break;
+ case glslang::EOpSubgroupQuadSwapHorizontal: directionId = builder.makeUintConstant(0); break;
+ case glslang::EOpSubgroupQuadSwapVertical: directionId = builder.makeUintConstant(1); break;
+ case glslang::EOpSubgroupQuadSwapDiagonal: directionId = builder.makeUintConstant(2); break;
+ }
+ if (directionId != spv::NoResult) {
+ spv::IdImmediate direction = { true, directionId };
+ spvGroupOperands.push_back(direction);
+ }
+
+ return builder.createOp(opCode, typeId, spvGroupOperands);
+}
+
+spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+
+ spv::Op opCode = spv::OpNop;
+ int extBuiltins = -1;
+ int libCall = -1;
+ size_t consumedOperands = operands.size();
+ spv::Id typeId0 = 0;
+ if (consumedOperands > 0)
+ typeId0 = builder.getTypeId(operands[0]);
+ spv::Id typeId1 = 0;
+ if (consumedOperands > 1)
+ typeId1 = builder.getTypeId(operands[1]);
+ spv::Id frexpIntType = 0;
+
+ switch (op) {
+ case glslang::EOpMin:
+ if (isFloat)
+ libCall = spv::GLSLstd450FMin;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UMin;
+ else
+ libCall = spv::GLSLstd450SMin;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpModf:
+ libCall = spv::GLSLstd450Modf;
+ break;
+ case glslang::EOpMax:
+ if (isFloat)
+ libCall = spv::GLSLstd450FMax;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UMax;
+ else
+ libCall = spv::GLSLstd450SMax;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpPow:
+ libCall = spv::GLSLstd450Pow;
+ break;
+ case glslang::EOpDot:
+ opCode = spv::OpDot;
+ break;
+ case glslang::EOpAtan:
+ libCall = spv::GLSLstd450Atan2;
+ break;
+
+ case glslang::EOpClamp:
+ if (isFloat)
+ libCall = spv::GLSLstd450FClamp;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UClamp;
+ else
+ libCall = spv::GLSLstd450SClamp;
+ builder.promoteScalar(precision, operands.front(), operands[1]);
+ builder.promoteScalar(precision, operands.front(), operands[2]);
+ break;
+ case glslang::EOpMix:
+ if (! builder.isBoolType(builder.getScalarTypeId(builder.getTypeId(operands.back())))) {
+ assert(isFloat);
+ libCall = spv::GLSLstd450FMix;
+ } else {
+ opCode = spv::OpSelect;
+ std::swap(operands.front(), operands.back());
+ }
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpStep:
+ libCall = spv::GLSLstd450Step;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpSmoothStep:
+ libCall = spv::GLSLstd450SmoothStep;
+ builder.promoteScalar(precision, operands[0], operands[2]);
+ builder.promoteScalar(precision, operands[1], operands[2]);
+ break;
+
+ case glslang::EOpDistance:
+ libCall = spv::GLSLstd450Distance;
+ break;
+ case glslang::EOpCross:
+ libCall = spv::GLSLstd450Cross;
+ break;
+ case glslang::EOpFaceForward:
+ libCall = spv::GLSLstd450FaceForward;
+ break;
+ case glslang::EOpReflect:
+ libCall = spv::GLSLstd450Reflect;
+ break;
+ case glslang::EOpRefract:
+ libCall = spv::GLSLstd450Refract;
+ break;
+ case glslang::EOpInterpolateAtSample:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtSample;
+ break;
+ case glslang::EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtOffset;
+ break;
+ case glslang::EOpAddCarry:
+ opCode = spv::OpIAddCarry;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpSubBorrow:
+ opCode = spv::OpISubBorrow;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpUMulExtended:
+ opCode = spv::OpUMulExtended;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpIMulExtended:
+ opCode = spv::OpSMulExtended;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpBitfieldExtract:
+ if (isUnsigned)
+ opCode = spv::OpBitFieldUExtract;
+ else
+ opCode = spv::OpBitFieldSExtract;
+ break;
+ case glslang::EOpBitfieldInsert:
+ opCode = spv::OpBitFieldInsert;
+ break;
+
+ case glslang::EOpFma:
+ libCall = spv::GLSLstd450Fma;
+ break;
+ case glslang::EOpFrexp:
+ {
+ libCall = spv::GLSLstd450FrexpStruct;
+ assert(builder.isPointerType(typeId1));
+ typeId1 = builder.getContainedTypeId(typeId1);
+ int width = builder.getScalarTypeWidth(typeId1);
+#ifdef AMD_EXTENSIONS
+ if (width == 16)
+ // Using 16-bit exp operand, enable extension SPV_AMD_gpu_shader_int16
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_int16);
+#endif
+ if (builder.getNumComponents(operands[0]) == 1)
+ frexpIntType = builder.makeIntegerType(width, true);
+ else
+ frexpIntType = builder.makeVectorType(builder.makeIntegerType(width, true), builder.getNumComponents(operands[0]));
+ typeId = builder.makeStructResultType(typeId0, frexpIntType);
+ consumedOperands = 1;
+ }
+ break;
+ case glslang::EOpLdexp:
+ libCall = spv::GLSLstd450Ldexp;
+ break;
+
+ case glslang::EOpReadInvocation:
+ return createInvocationsOperation(op, typeId, operands, typeProxy);
+
+ case glslang::EOpSubgroupBroadcast:
+ case glslang::EOpSubgroupBallotBitExtract:
+ case glslang::EOpSubgroupShuffle:
+ case glslang::EOpSubgroupShuffleXor:
+ case glslang::EOpSubgroupShuffleUp:
+ case glslang::EOpSubgroupShuffleDown:
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ case glslang::EOpSubgroupQuadBroadcast:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+#endif
+ return createSubgroupOperation(op, typeId, operands, typeProxy);
+
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSwizzleInvocations:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::SwizzleInvocationsAMD;
+ break;
+ case glslang::EOpSwizzleInvocationsMasked:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::SwizzleInvocationsMaskedAMD;
+ break;
+ case glslang::EOpWriteInvocation:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::WriteInvocationAMD;
+ break;
+
+ case glslang::EOpMin3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMin3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMin3AMD;
+ else
+ libCall = spv::SMin3AMD;
+ }
+ break;
+ case glslang::EOpMax3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMax3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMax3AMD;
+ else
+ libCall = spv::SMax3AMD;
+ }
+ break;
+ case glslang::EOpMid3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMid3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMid3AMD;
+ else
+ libCall = spv::SMid3AMD;
+ }
+ break;
+
+ case glslang::EOpInterpolateAtVertex:
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ libCall = spv::InterpolateAtVertexAMD;
+ break;
+#endif
+ case glslang::EOpBarrier:
+ {
+ // This is for the extended controlBarrier function, with four operands.
+ // The unextended barrier() goes through createNoArgOperation.
+ assert(operands.size() == 4);
+ unsigned int executionScope = builder.getConstantScalar(operands[0]);
+ unsigned int memoryScope = builder.getConstantScalar(operands[1]);
+ unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]);
+ builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || memoryScope == spv::ScopeDevice)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return 0;
+ }
+ break;
+ case glslang::EOpMemoryBarrier:
+ {
+ // This is for the extended memoryBarrier function, with three operands.
+ // The unextended memoryBarrier() goes through createNoArgOperation.
+ assert(operands.size() == 3);
+ unsigned int memoryScope = builder.getConstantScalar(operands[0]);
+ unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]);
+ builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return 0;
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EOpReportIntersectionNV:
+ {
+ typeId = builder.makeBoolType();
+ opCode = spv::OpReportIntersectionNV;
+ }
+ break;
+ case glslang::EOpTraceNV:
+ {
+ builder.createNoResultOp(spv::OpTraceNV, operands);
+ return 0;
+ }
+ break;
+ case glslang::EOpExecuteCallableNV:
+ {
+ builder.createNoResultOp(spv::OpExecuteCallableNV, operands);
+ return 0;
+ }
+ break;
+ case glslang::EOpWritePackedPrimitiveIndices4x8NV:
+ builder.createNoResultOp(spv::OpWritePackedPrimitiveIndices4x8NV, operands);
+ return 0;
+#endif
+ case glslang::EOpCooperativeMatrixMulAdd:
+ opCode = spv::OpCooperativeMatrixMulAddNV;
+ break;
+
+ default:
+ return 0;
+ }
+
+ spv::Id id = 0;
+ if (libCall >= 0) {
+ // Use an extended instruction from the standard library.
+ // Construct the call arguments, without modifying the original operands vector.
+ // We might need the remaining arguments, e.g. in the EOpFrexp case.
+ std::vector<spv::Id> callArguments(operands.begin(), operands.begin() + consumedOperands);
+ id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, callArguments);
+ } else if (opCode == spv::OpDot && !isFloat) {
+ // int dot(int, int)
+ // NOTE: never called for scalar/vector1, this is turned into simple mul before this can be reached
+ const int componentCount = builder.getNumComponents(operands[0]);
+ spv::Id mulOp = builder.createBinOp(spv::OpIMul, builder.getTypeId(operands[0]), operands[0], operands[1]);
+ builder.setPrecision(mulOp, precision);
+ id = builder.createCompositeExtract(mulOp, typeId, 0);
+ for (int i = 1; i < componentCount; ++i) {
+ builder.setPrecision(id, precision);
+ id = builder.createBinOp(spv::OpIAdd, typeId, id, builder.createCompositeExtract(operands[0], typeId, i));
+ }
+ } else {
+ switch (consumedOperands) {
+ case 0:
+ // should all be handled by visitAggregate and createNoArgOperation
+ assert(0);
+ return 0;
+ case 1:
+ // should all be handled by createUnaryOperation
+ assert(0);
+ return 0;
+ case 2:
+ id = builder.createBinOp(opCode, typeId, operands[0], operands[1]);
+ break;
+ default:
+ // anything 3 or over doesn't have l-value operands, so all should be consumed
+ assert(consumedOperands == operands.size());
+ id = builder.createOp(opCode, typeId, operands);
+ break;
+ }
+ }
+
+ // Decode the return types that were structures
+ switch (op) {
+ case glslang::EOpAddCarry:
+ case glslang::EOpSubBorrow:
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+ id = builder.createCompositeExtract(id, typeId0, 0);
+ break;
+ case glslang::EOpUMulExtended:
+ case glslang::EOpIMulExtended:
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 0), operands[3]);
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+ break;
+ case glslang::EOpFrexp:
+ {
+ assert(operands.size() == 2);
+ if (builder.isFloatType(builder.getScalarTypeId(typeId1))) {
+ // "exp" is floating-point type (from HLSL intrinsic)
+ spv::Id member1 = builder.createCompositeExtract(id, frexpIntType, 1);
+ member1 = builder.createUnaryOp(spv::OpConvertSToF, typeId1, member1);
+ builder.createStore(member1, operands[1]);
+ } else
+ // "exp" is integer type (from GLSL built-in function)
+ builder.createStore(builder.createCompositeExtract(id, frexpIntType, 1), operands[1]);
+ id = builder.createCompositeExtract(id, typeId0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return builder.setPrecision(id, precision);
+}
+
+// Intrinsics with no arguments (or no return value, and no precision).
+spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
+{
+ // GLSL memory barriers use queuefamily scope in new model, device scope in old model
+ spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
+
+ switch (op) {
+ case glslang::EOpEmitVertex:
+ builder.createNoResultOp(spv::OpEmitVertex);
+ return 0;
+ case glslang::EOpEndPrimitive:
+ builder.createNoResultOp(spv::OpEndPrimitive);
+ return 0;
+ case glslang::EOpBarrier:
+ if (glslangIntermediate->getStage() == EShLangTessControl) {
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsOutputMemoryKHRMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ } else {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
+ }
+ } else {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ }
+ return 0;
+ case glslang::EOpMemoryBarrier:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierAtomicCounter:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierBuffer:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierImage:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierShared:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpGroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpAllMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice,
+ spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpDeviceMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpDeviceMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpWorkgroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpSubgroupBarrier:
+ builder.createControlBarrier(spv::ScopeSubgroup, spv::ScopeSubgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierBuffer:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierImage:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierShared:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupElect: {
+ std::vector<spv::Id> operands;
+ return createSubgroupOperation(op, typeId, operands, glslang::EbtVoid);
+ }
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpTime:
+ {
+ std::vector<spv::Id> args; // Dummy arguments
+ spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args);
+ return builder.setPrecision(id, precision);
+ }
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpIgnoreIntersectionNV:
+ builder.createNoResultOp(spv::OpIgnoreIntersectionNV);
+ return 0;
+ case glslang::EOpTerminateRayNV:
+ builder.createNoResultOp(spv::OpTerminateRayNV);
+ return 0;
+#endif
+ default:
+ logger->missingFunctionality("unknown operation with no arguments");
+ return 0;
+ }
+}
+
+spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol)
+{
+ auto iter = symbolValues.find(symbol->getId());
+ spv::Id id;
+ if (symbolValues.end() != iter) {
+ id = iter->second;
+ return id;
+ }
+
+ // it was not found, create it
+ id = createSpvVariable(symbol);
+ symbolValues[symbol->getId()] = id;
+
+ if (symbol->getBasicType() != glslang::EbtBlock) {
+ builder.addDecoration(id, TranslatePrecisionDecoration(symbol->getType()));
+ builder.addDecoration(id, TranslateInterpolationDecoration(symbol->getType().getQualifier()));
+ builder.addDecoration(id, TranslateAuxiliaryStorageDecoration(symbol->getType().getQualifier()));
+#ifdef NV_EXTENSIONS
+ addMeshNVDecoration(id, /*member*/ -1, symbol->getType().getQualifier());
+#endif
+ if (symbol->getType().getQualifier().hasSpecConstantId())
+ builder.addDecoration(id, spv::DecorationSpecId, symbol->getType().getQualifier().layoutSpecConstantId);
+ if (symbol->getQualifier().hasIndex())
+ builder.addDecoration(id, spv::DecorationIndex, symbol->getQualifier().layoutIndex);
+ if (symbol->getQualifier().hasComponent())
+ builder.addDecoration(id, spv::DecorationComponent, symbol->getQualifier().layoutComponent);
+ // atomic counters use this:
+ if (symbol->getQualifier().hasOffset())
+ builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutOffset);
+ }
+
+ if (symbol->getQualifier().hasLocation())
+ builder.addDecoration(id, spv::DecorationLocation, symbol->getQualifier().layoutLocation);
+ builder.addDecoration(id, TranslateInvariantDecoration(symbol->getType().getQualifier()));
+ if (symbol->getQualifier().hasStream() && glslangIntermediate->isMultiStream()) {
+ builder.addCapability(spv::CapabilityGeometryStreams);
+ builder.addDecoration(id, spv::DecorationStream, symbol->getQualifier().layoutStream);
+ }
+ if (symbol->getQualifier().hasSet())
+ builder.addDecoration(id, spv::DecorationDescriptorSet, symbol->getQualifier().layoutSet);
+ else if (IsDescriptorResource(symbol->getType())) {
+ // default to 0
+ builder.addDecoration(id, spv::DecorationDescriptorSet, 0);
+ }
+ if (symbol->getQualifier().hasBinding())
+ builder.addDecoration(id, spv::DecorationBinding, symbol->getQualifier().layoutBinding);
+ else if (IsDescriptorResource(symbol->getType())) {
+ // default to 0
+ builder.addDecoration(id, spv::DecorationBinding, 0);
+ }
+ if (symbol->getQualifier().hasAttachment())
+ builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment);
+ if (glslangIntermediate->getXfbMode()) {
+ builder.addCapability(spv::CapabilityTransformFeedback);
+ if (symbol->getQualifier().hasXfbBuffer()) {
+ builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer);
+ unsigned stride = glslangIntermediate->getXfbStride(symbol->getQualifier().layoutXfbBuffer);
+ if (stride != glslang::TQualifier::layoutXfbStrideEnd)
+ builder.addDecoration(id, spv::DecorationXfbStride, stride);
+ }
+ if (symbol->getQualifier().hasXfbOffset())
+ builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset);
+ }
+
+ if (symbol->getType().isImage()) {
+ std::vector<spv::Decoration> memory;
+ TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel());
+ for (unsigned int i = 0; i < memory.size(); ++i)
+ builder.addDecoration(id, memory[i]);
+ }
+
+ // built-in variable decorations
+ spv::BuiltIn builtIn = TranslateBuiltInDecoration(symbol->getQualifier().builtIn, false);
+ if (builtIn != spv::BuiltInMax)
+ builder.addDecoration(id, spv::DecorationBuiltIn, (int)builtIn);
+
+ // nonuniform
+ builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier()));
+
+#ifdef NV_EXTENSIONS
+ if (builtIn == spv::BuiltInSampleMask) {
+ spv::Decoration decoration;
+ // GL_NV_sample_mask_override_coverage extension
+ if (glslangIntermediate->getLayoutOverrideCoverage())
+ decoration = (spv::Decoration)spv::DecorationOverrideCoverageNV;
+ else
+ decoration = (spv::Decoration)spv::DecorationMax;
+ builder.addDecoration(id, decoration);
+ if (decoration != spv::DecorationMax) {
+ builder.addExtension(spv::E_SPV_NV_sample_mask_override_coverage);
+ }
+ }
+ else if (builtIn == spv::BuiltInLayer) {
+ // SPV_NV_viewport_array2 extension
+ if (symbol->getQualifier().layoutViewportRelative) {
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationViewportRelativeNV);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ }
+ if (symbol->getQualifier().layoutSecondaryViewportRelativeOffset != -2048) {
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV,
+ symbol->getQualifier().layoutSecondaryViewportRelativeOffset);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ }
+ }
+
+ if (symbol->getQualifier().layoutPassthrough) {
+ builder.addDecoration(id, spv::DecorationPassthroughNV);
+ builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV);
+ builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough);
+ }
+ if (symbol->getQualifier().pervertexNV) {
+ builder.addDecoration(id, spv::DecorationPerVertexNV);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ }
+#endif
+
+ if (glslangIntermediate->getHlslFunctionality1() && symbol->getType().getQualifier().semanticName != nullptr) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE,
+ symbol->getType().getQualifier().semanticName);
+ }
+
+ if (symbol->getBasicType() == glslang::EbtReference) {
+ builder.addDecoration(id, symbol->getType().getQualifier().restrict ? spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT);
+ }
+
+ return id;
+}
+
+#ifdef NV_EXTENSIONS
+// add per-primitive, per-view. per-task decorations to a struct member (member >= 0) or an object
+void TGlslangToSpvTraverser::addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier& qualifier)
+{
+ if (member >= 0) {
+ if (qualifier.perPrimitiveNV) {
+ // Need to add capability/extension for fragment shader.
+ // Mesh shader already adds this by default.
+ if (glslangIntermediate->getStage() == EShLangFragment) {
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ }
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerPrimitiveNV);
+ }
+ if (qualifier.perViewNV)
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerViewNV);
+ if (qualifier.perTaskNV)
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerTaskNV);
+ } else {
+ if (qualifier.perPrimitiveNV) {
+ // Need to add capability/extension for fragment shader.
+ // Mesh shader already adds this by default.
+ if (glslangIntermediate->getStage() == EShLangFragment) {
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ }
+ builder.addDecoration(id, spv::DecorationPerPrimitiveNV);
+ }
+ if (qualifier.perViewNV)
+ builder.addDecoration(id, spv::DecorationPerViewNV);
+ if (qualifier.perTaskNV)
+ builder.addDecoration(id, spv::DecorationPerTaskNV);
+ }
+}
+#endif
+
+// Make a full tree of instructions to build a SPIR-V specialization constant,
+// or regular constant if possible.
+//
+// TBD: this is not yet done, nor verified to be the best design, it does do the leaf symbols though
+//
+// Recursively walk the nodes. The nodes form a tree whose leaves are
+// regular constants, which themselves are trees that createSpvConstant()
+// recursively walks. So, this function walks the "top" of the tree:
+// - emit specialization constant-building instructions for specConstant
+// - when running into a non-spec-constant, switch to createSpvConstant()
+spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& node)
+{
+ assert(node.getQualifier().isConstant());
+
+ // Handle front-end constants first (non-specialization constants).
+ if (! node.getQualifier().specConstant) {
+ // hand off to the non-spec-constant path
+ assert(node.getAsConstantUnion() != nullptr || node.getAsSymbolNode() != nullptr);
+ int nextConst = 0;
+ return createSpvConstantFromConstUnionArray(node.getType(), node.getAsConstantUnion() ? node.getAsConstantUnion()->getConstArray() : node.getAsSymbolNode()->getConstArray(),
+ nextConst, false);
+ }
+
+ // We now know we have a specialization constant to build
+
+ // gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants,
+ // even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ...
+ if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) {
+ std::vector<spv::Id> dimConstId;
+ for (int dim = 0; dim < 3; ++dim) {
+ bool specConst = (glslangIntermediate->getLocalSizeSpecId(dim) != glslang::TQualifier::layoutNotSet);
+ dimConstId.push_back(builder.makeUintConstant(glslangIntermediate->getLocalSize(dim), specConst));
+ if (specConst) {
+ builder.addDecoration(dimConstId.back(), spv::DecorationSpecId,
+ glslangIntermediate->getLocalSizeSpecId(dim));
+ }
+ }
+ return builder.makeCompositeConstant(builder.makeVectorType(builder.makeUintType(32), 3), dimConstId, true);
+ }
+
+ // An AST node labelled as specialization constant should be a symbol node.
+ // Its initializer should either be a sub tree with constant nodes, or a constant union array.
+ if (auto* sn = node.getAsSymbolNode()) {
+ spv::Id result;
+ if (auto* sub_tree = sn->getConstSubtree()) {
+ // Traverse the constant constructor sub tree like generating normal run-time instructions.
+ // During the AST traversal, if the node is marked as 'specConstant', SpecConstantOpModeGuard
+ // will set the builder into spec constant op instruction generating mode.
+ sub_tree->traverse(this);
+ result = accessChainLoad(sub_tree->getType());
+ } else if (auto* const_union_array = &sn->getConstArray()) {
+ int nextConst = 0;
+ result = createSpvConstantFromConstUnionArray(sn->getType(), *const_union_array, nextConst, true);
+ } else {
+ logger->missingFunctionality("Invalid initializer for spec onstant.");
+ return spv::NoResult;
+ }
+ builder.addName(result, sn->getName().c_str());
+ return result;
+ }
+
+ // Neither a front-end constant node, nor a specialization constant node with constant union array or
+ // constant sub tree as initializer.
+ logger->missingFunctionality("Neither a front-end constant nor a spec constant.");
+ return spv::NoResult;
+}
+
+// Use 'consts' as the flattened glslang source of scalar constants to recursively
+// build the aggregate SPIR-V constant.
+//
+// If there are not enough elements present in 'consts', 0 will be substituted;
+// an empty 'consts' can be used to create a fully zeroed SPIR-V constant.
+//
+spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glslang::TType& glslangType, const glslang::TConstUnionArray& consts, int& nextConst, bool specConstant)
+{
+ // vector of constants for SPIR-V
+ std::vector<spv::Id> spvConsts;
+
+ // Type is used for struct and array constants
+ spv::Id typeId = convertGlslangToSpvType(glslangType);
+
+ if (glslangType.isArray()) {
+ glslang::TType elementType(glslangType, 0);
+ for (int i = 0; i < glslangType.getOuterArraySize(); ++i)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(elementType, consts, nextConst, false));
+ } else if (glslangType.isMatrix()) {
+ glslang::TType vectorType(glslangType, 0);
+ for (int col = 0; col < glslangType.getMatrixCols(); ++col)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(vectorType, consts, nextConst, false));
+ } else if (glslangType.isCoopMat()) {
+ glslang::TType componentType(glslangType.getBasicType());
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(componentType, consts, nextConst, false));
+ } else if (glslangType.isStruct()) {
+ glslang::TVector<glslang::TTypeLoc>::const_iterator iter;
+ for (iter = glslangType.getStruct()->begin(); iter != glslangType.getStruct()->end(); ++iter)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(*iter->type, consts, nextConst, false));
+ } else if (glslangType.getVectorSize() > 1) {
+ for (unsigned int i = 0; i < (unsigned int)glslangType.getVectorSize(); ++i) {
+ bool zero = nextConst >= consts.size();
+ switch (glslangType.getBasicType()) {
+ case glslang::EbtInt8:
+ spvConsts.push_back(builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const()));
+ break;
+ case glslang::EbtUint8:
+ spvConsts.push_back(builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const()));
+ break;
+ case glslang::EbtInt16:
+ spvConsts.push_back(builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const()));
+ break;
+ case glslang::EbtUint16:
+ spvConsts.push_back(builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const()));
+ break;
+ case glslang::EbtInt:
+ spvConsts.push_back(builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst()));
+ break;
+ case glslang::EbtUint:
+ spvConsts.push_back(builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst()));
+ break;
+ case glslang::EbtInt64:
+ spvConsts.push_back(builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const()));
+ break;
+ case glslang::EbtUint64:
+ spvConsts.push_back(builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const()));
+ break;
+ case glslang::EbtFloat:
+ spvConsts.push_back(builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtDouble:
+ spvConsts.push_back(builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtFloat16:
+ spvConsts.push_back(builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtBool:
+ spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst()));
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ ++nextConst;
+ }
+ } else {
+ // we have a non-aggregate (scalar) constant
+ bool zero = nextConst >= consts.size();
+ spv::Id scalar = 0;
+ switch (glslangType.getBasicType()) {
+ case glslang::EbtInt8:
+ scalar = builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const(), specConstant);
+ break;
+ case glslang::EbtUint8:
+ scalar = builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const(), specConstant);
+ break;
+ case glslang::EbtInt16:
+ scalar = builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const(), specConstant);
+ break;
+ case glslang::EbtUint16:
+ scalar = builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const(), specConstant);
+ break;
+ case glslang::EbtInt:
+ scalar = builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst(), specConstant);
+ break;
+ case glslang::EbtUint:
+ scalar = builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst(), specConstant);
+ break;
+ case glslang::EbtInt64:
+ scalar = builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const(), specConstant);
+ break;
+ case glslang::EbtUint64:
+ scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant);
+ break;
+ case glslang::EbtFloat:
+ scalar = builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtDouble:
+ scalar = builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtFloat16:
+ scalar = builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtBool:
+ scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant);
+ break;
+ case glslang::EbtReference:
+ scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant);
+ scalar = builder.createUnaryOp(spv::OpBitcast, typeId, scalar);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ ++nextConst;
+ return scalar;
+ }
+
+ return builder.makeCompositeConstant(typeId, spvConsts);
+}
+
+// Return true if the node is a constant or symbol whose reading has no
+// non-trivial observable cost or effect.
+bool TGlslangToSpvTraverser::isTrivialLeaf(const glslang::TIntermTyped* node)
+{
+ // don't know what this is
+ if (node == nullptr)
+ return false;
+
+ // a constant is safe
+ if (node->getAsConstantUnion() != nullptr)
+ return true;
+
+ // not a symbol means non-trivial
+ if (node->getAsSymbolNode() == nullptr)
+ return false;
+
+ // a symbol, depends on what's being read
+ switch (node->getType().getQualifier().storage) {
+ case glslang::EvqTemporary:
+ case glslang::EvqGlobal:
+ case glslang::EvqIn:
+ case glslang::EvqInOut:
+ case glslang::EvqConst:
+ case glslang::EvqConstReadOnly:
+ case glslang::EvqUniform:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A node is trivial if it is a single operation with no side effects.
+// HLSL (and/or vectors) are always trivial, as it does not short circuit.
+// Otherwise, error on the side of saying non-trivial.
+// Return true if trivial.
+bool TGlslangToSpvTraverser::isTrivial(const glslang::TIntermTyped* node)
+{
+ if (node == nullptr)
+ return false;
+
+ // count non scalars as trivial, as well as anything coming from HLSL
+ if (! node->getType().isScalarOrVec1() || glslangIntermediate->getSource() == glslang::EShSourceHlsl)
+ return true;
+
+ // symbols and constants are trivial
+ if (isTrivialLeaf(node))
+ return true;
+
+ // otherwise, it needs to be a simple operation or one or two leaf nodes
+
+ // not a simple operation
+ const glslang::TIntermBinary* binaryNode = node->getAsBinaryNode();
+ const glslang::TIntermUnary* unaryNode = node->getAsUnaryNode();
+ if (binaryNode == nullptr && unaryNode == nullptr)
+ return false;
+
+ // not on leaf nodes
+ if (binaryNode && (! isTrivialLeaf(binaryNode->getLeft()) || ! isTrivialLeaf(binaryNode->getRight())))
+ return false;
+
+ if (unaryNode && ! isTrivialLeaf(unaryNode->getOperand())) {
+ return false;
+ }
+
+ switch (node->getAsOperator()->getOp()) {
+ case glslang::EOpLogicalNot:
+ case glslang::EOpConvIntToBool:
+ case glslang::EOpConvUintToBool:
+ case glslang::EOpConvFloatToBool:
+ case glslang::EOpConvDoubleToBool:
+ case glslang::EOpEqual:
+ case glslang::EOpNotEqual:
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ case glslang::EOpLogicalXor:
+ case glslang::EOpAny:
+ case glslang::EOpAll:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Emit short-circuiting code, where 'right' is never evaluated unless
+// the left side is true (for &&) or false (for ||).
+spv::Id TGlslangToSpvTraverser::createShortCircuit(glslang::TOperator op, glslang::TIntermTyped& left, glslang::TIntermTyped& right)
+{
+ spv::Id boolTypeId = builder.makeBoolType();
+
+ // emit left operand
+ builder.clearAccessChain();
+ left.traverse(this);
+ spv::Id leftId = accessChainLoad(left.getType());
+
+ // Operands to accumulate OpPhi operands
+ std::vector<spv::Id> phiOperands;
+ // accumulate left operand's phi information
+ phiOperands.push_back(leftId);
+ phiOperands.push_back(builder.getBuildPoint()->getId());
+
+ // Make the two kinds of operation symmetric with a "!"
+ // || => emit "if (! left) result = right"
+ // && => emit "if ( left) result = right"
+ //
+ // TODO: this runtime "not" for || could be avoided by adding functionality
+ // to 'builder' to have an "else" without an "then"
+ if (op == glslang::EOpLogicalOr)
+ leftId = builder.createUnaryOp(spv::OpLogicalNot, boolTypeId, leftId);
+
+ // make an "if" based on the left value
+ spv::Builder::If ifBuilder(leftId, spv::SelectionControlMaskNone, builder);
+
+ // emit right operand as the "then" part of the "if"
+ builder.clearAccessChain();
+ right.traverse(this);
+ spv::Id rightId = accessChainLoad(right.getType());
+
+ // accumulate left operand's phi information
+ phiOperands.push_back(rightId);
+ phiOperands.push_back(builder.getBuildPoint()->getId());
+
+ // finish the "if"
+ ifBuilder.makeEndIf();
+
+ // phi together the two results
+ return builder.createOp(spv::OpPhi, boolTypeId, phiOperands);
+}
+
+#ifdef AMD_EXTENSIONS
+// Return type Id of the imported set of extended instructions corresponds to the name.
+// Import this set if it has not been imported yet.
+spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name)
+{
+ if (extBuiltinMap.find(name) != extBuiltinMap.end())
+ return extBuiltinMap[name];
+ else {
+ builder.addExtension(name);
+ spv::Id extBuiltins = builder.import(name);
+ extBuiltinMap[name] = extBuiltins;
+ return extBuiltins;
+ }
+}
+#endif
+
+}; // end anonymous namespace
+
+namespace glslang {
+
+void GetSpirvVersion(std::string& version)
+{
+ const int bufSize = 100;
+ char buf[bufSize];
+ snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision);
+ version = buf;
+}
+
+// For low-order part of the generator's magic number. Bump up
+// when there is a change in the style (e.g., if SSA form changes,
+// or a different instruction sequence to do something gets used).
+int GetSpirvGeneratorVersion()
+{
+ // return 1; // start
+ // return 2; // EOpAtomicCounterDecrement gets a post decrement, to map between GLSL -> SPIR-V
+ // return 3; // change/correct barrier-instruction operands, to match memory model group decisions
+ // return 4; // some deeper access chains: for dynamic vector component, and local Boolean component
+ // return 5; // make OpArrayLength result type be an int with signedness of 0
+ // return 6; // revert version 5 change, which makes a different (new) kind of incorrect code,
+ // versions 4 and 6 each generate OpArrayLength as it has long been done
+ return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent
+}
+
+// Write SPIR-V out to a binary file
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName)
+{
+ std::ofstream out;
+ out.open(baseName, std::ios::binary | std::ios::out);
+ if (out.fail())
+ printf("ERROR: Failed to open file: %s\n", baseName);
+ for (int i = 0; i < (int)spirv.size(); ++i) {
+ unsigned int word = spirv[i];
+ out.write((const char*)&word, 4);
+ }
+ out.close();
+}
+
+// Write SPIR-V out to a text file with 32-bit hexadecimal words
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName)
+{
+ std::ofstream out;
+ out.open(baseName, std::ios::binary | std::ios::out);
+ if (out.fail())
+ printf("ERROR: Failed to open file: %s\n", baseName);
+ out << "\t// " <<
+ GetSpirvGeneratorVersion() << "." << GLSLANG_MINOR_VERSION << "." << GLSLANG_PATCH_LEVEL <<
+ std::endl;
+ if (varName != nullptr) {
+ out << "\t #pragma once" << std::endl;
+ out << "const uint32_t " << varName << "[] = {" << std::endl;
+ }
+ const int WORDS_PER_LINE = 8;
+ for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) {
+ out << "\t";
+ for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) {
+ const unsigned int word = spirv[i + j];
+ out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word;
+ if (i + j + 1 < (int)spirv.size()) {
+ out << ",";
+ }
+ }
+ out << std::endl;
+ }
+ if (varName != nullptr) {
+ out << "};";
+ }
+ out.close();
+}
+
+//
+// Set up the glslang traversal
+//
+void GlslangToSpv(const TIntermediate& intermediate, std::vector<unsigned int>& spirv, SpvOptions* options)
+{
+ spv::SpvBuildLogger logger;
+ GlslangToSpv(intermediate, spirv, &logger, options);
+}
+
+void GlslangToSpv(const TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger, SpvOptions* options)
+{
+ TIntermNode* root = intermediate.getTreeRoot();
+
+ if (root == 0)
+ return;
+
+ SpvOptions defaultOptions;
+ if (options == nullptr)
+ options = &defaultOptions;
+
+ GetThreadPoolAllocator().push();
+
+ TGlslangToSpvTraverser it(intermediate.getSpv().spv, &intermediate, logger, *options);
+ root->traverse(&it);
+ it.finishSpv();
+ it.dumpSpv(spirv);
+
+#if ENABLE_OPT
+ // If from HLSL, run spirv-opt to "legalize" the SPIR-V for Vulkan
+ // eg. forward and remove memory writes of opaque types.
+ if ((intermediate.getSource() == EShSourceHlsl || options->optimizeSize) && !options->disableOptimizer)
+ SpirvToolsLegalize(intermediate, spirv, logger, options);
+
+ if (options->validate)
+ SpirvToolsValidate(intermediate, spirv, logger);
+
+ if (options->disassemble)
+ SpirvToolsDisassemble(std::cout, spirv);
+
+#endif
+
+ GetThreadPoolAllocator().pop();
+}
+
+}; // end namespace glslang
diff --git a/src/3rdparty/glslang/SPIRV/GlslangToSpv.h b/src/3rdparty/glslang/SPIRV/GlslangToSpv.h
new file mode 100644
index 0000000..86e1c23
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/GlslangToSpv.h
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2014 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#pragma once
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+ #pragma warning(disable : 4464) // relative include path contains '..'
+#endif
+
+#include "SpvTools.h"
+#include "../glslang/Include/intermediate.h"
+
+#include <string>
+#include <vector>
+
+#include "Logger.h"
+
+namespace glslang {
+
+void GetSpirvVersion(std::string&);
+int GetSpirvGeneratorVersion();
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ SpvOptions* options = nullptr);
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
+
+}
diff --git a/src/3rdparty/glslang/SPIRV/InReadableOrder.cpp b/src/3rdparty/glslang/SPIRV/InReadableOrder.cpp
new file mode 100644
index 0000000..52b2961
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/InReadableOrder.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+// The SPIR-V spec requires code blocks to appear in an order satisfying the
+// dominator-tree direction (ie, dominator before the dominated). This is,
+// actually, easy to achieve: any pre-order CFG traversal algorithm will do it.
+// Because such algorithms visit a block only after traversing some path to it
+// from the root, they necessarily visit the block's idom first.
+//
+// But not every graph-traversal algorithm outputs blocks in an order that
+// appears logical to human readers. The problem is that unrelated branches may
+// be interspersed with each other, and merge blocks may come before some of the
+// branches being merged.
+//
+// A good, human-readable order of blocks may be achieved by performing
+// depth-first search but delaying merge nodes until after all their branches
+// have been visited. This is implemented below by the inReadableOrder()
+// function.
+
+#include "spvIR.h"
+
+#include <cassert>
+#include <unordered_set>
+
+using spv::Block;
+using spv::Id;
+
+namespace {
+// Traverses CFG in a readable order, invoking a pre-set callback on each block.
+// Use by calling visit() on the root block.
+class ReadableOrderTraverser {
+public:
+ explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
+ // Visits the block if it hasn't been visited already and isn't currently
+ // being delayed. Invokes callback(block), then descends into its
+ // successors. Delays merge-block and continue-block processing until all
+ // the branches have been completed.
+ void visit(Block* block)
+ {
+ assert(block);
+ if (visited_.count(block) || delayed_.count(block))
+ return;
+ callback_(block);
+ visited_.insert(block);
+ Block* mergeBlock = nullptr;
+ Block* continueBlock = nullptr;
+ auto mergeInst = block->getMergeInstruction();
+ if (mergeInst) {
+ Id mergeId = mergeInst->getIdOperand(0);
+ mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock();
+ delayed_.insert(mergeBlock);
+ if (mergeInst->getOpCode() == spv::OpLoopMerge) {
+ Id continueId = mergeInst->getIdOperand(1);
+ continueBlock =
+ block->getParent().getParent().getInstruction(continueId)->getBlock();
+ delayed_.insert(continueBlock);
+ }
+ }
+ const auto successors = block->getSuccessors();
+ for (auto it = successors.cbegin(); it != successors.cend(); ++it)
+ visit(*it);
+ if (continueBlock) {
+ delayed_.erase(continueBlock);
+ visit(continueBlock);
+ }
+ if (mergeBlock) {
+ delayed_.erase(mergeBlock);
+ visit(mergeBlock);
+ }
+ }
+
+private:
+ std::function<void(Block*)> callback_;
+ // Whether a block has already been visited or is being delayed.
+ std::unordered_set<Block *> visited_, delayed_;
+};
+}
+
+void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
+{
+ ReadableOrderTraverser(callback).visit(root);
+}
diff --git a/src/3rdparty/glslang/SPIRV/Logger.cpp b/src/3rdparty/glslang/SPIRV/Logger.cpp
new file mode 100644
index 0000000..48bd4e3
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/Logger.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include "Logger.h"
+
+#include <algorithm>
+#include <iterator>
+#include <sstream>
+
+namespace spv {
+
+void SpvBuildLogger::tbdFunctionality(const std::string& f)
+{
+ if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures))
+ tbdFeatures.push_back(f);
+}
+
+void SpvBuildLogger::missingFunctionality(const std::string& f)
+{
+ if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures))
+ missingFeatures.push_back(f);
+}
+
+std::string SpvBuildLogger::getAllMessages() const {
+ std::ostringstream messages;
+ for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it)
+ messages << "TBD functionality: " << *it << "\n";
+ for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it)
+ messages << "Missing functionality: " << *it << "\n";
+ for (auto it = warnings.cbegin(); it != warnings.cend(); ++it)
+ messages << "warning: " << *it << "\n";
+ for (auto it = errors.cbegin(); it != errors.cend(); ++it)
+ messages << "error: " << *it << "\n";
+ return messages.str();
+}
+
+} // end spv namespace
diff --git a/src/3rdparty/glslang/SPIRV/Logger.h b/src/3rdparty/glslang/SPIRV/Logger.h
new file mode 100644
index 0000000..2e4ddaf
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/Logger.h
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GLSLANG_SPIRV_LOGGER_H
+#define GLSLANG_SPIRV_LOGGER_H
+
+#include <string>
+#include <vector>
+
+namespace spv {
+
+// A class for holding all SPIR-V build status messages, including
+// missing/TBD functionalities, warnings, and errors.
+class SpvBuildLogger {
+public:
+ SpvBuildLogger() {}
+
+ // Registers a TBD functionality.
+ void tbdFunctionality(const std::string& f);
+ // Registers a missing functionality.
+ void missingFunctionality(const std::string& f);
+
+ // Logs a warning.
+ void warning(const std::string& w) { warnings.push_back(w); }
+ // Logs an error.
+ void error(const std::string& e) { errors.push_back(e); }
+
+ // Returns all messages accumulated in the order of:
+ // TBD functionalities, missing functionalities, warnings, errors.
+ std::string getAllMessages() const;
+
+private:
+ SpvBuildLogger(const SpvBuildLogger&);
+
+ std::vector<std::string> tbdFeatures;
+ std::vector<std::string> missingFeatures;
+ std::vector<std::string> warnings;
+ std::vector<std::string> errors;
+};
+
+} // end spv namespace
+
+#endif // GLSLANG_SPIRV_LOGGER_H
diff --git a/src/3rdparty/glslang/SPIRV/SPVRemapper.cpp b/src/3rdparty/glslang/SPIRV/SPVRemapper.cpp
new file mode 100644
index 0000000..fd0bb89
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SPVRemapper.cpp
@@ -0,0 +1,1487 @@
+//
+// Copyright (C) 2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "SPVRemapper.h"
+#include "doc.h"
+
+#if !defined (use_cpp11)
+// ... not supported before C++11
+#else // defined (use_cpp11)
+
+#include <algorithm>
+#include <cassert>
+#include "../glslang/Include/Common.h"
+
+namespace spv {
+
+ // By default, just abort on error. Can be overridden via RegisterErrorHandler
+ spirvbin_t::errorfn_t spirvbin_t::errorHandler = [](const std::string&) { exit(5); };
+ // By default, eat log messages. Can be overridden via RegisterLogHandler
+ spirvbin_t::logfn_t spirvbin_t::logHandler = [](const std::string&) { };
+
+ // This can be overridden to provide other message behavior if needed
+ void spirvbin_t::msg(int minVerbosity, int indent, const std::string& txt) const
+ {
+ if (verbose >= minVerbosity)
+ logHandler(std::string(indent, ' ') + txt);
+ }
+
+ // hash opcode, with special handling for OpExtInst
+ std::uint32_t spirvbin_t::asOpCodeHash(unsigned word)
+ {
+ const spv::Op opCode = asOpCode(word);
+
+ std::uint32_t offset = 0;
+
+ switch (opCode) {
+ case spv::OpExtInst:
+ offset += asId(word + 4); break;
+ default:
+ break;
+ }
+
+ return opCode * 19 + offset; // 19 = small prime
+ }
+
+ spirvbin_t::range_t spirvbin_t::literalRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ switch (opCode) {
+ case spv::OpTypeFloat: // fall through...
+ case spv::OpTypePointer: return range_t(2, 3);
+ case spv::OpTypeInt: return range_t(2, 4);
+ // TODO: case spv::OpTypeImage:
+ // TODO: case spv::OpTypeSampledImage:
+ case spv::OpTypeSampler: return range_t(3, 8);
+ case spv::OpTypeVector: // fall through
+ case spv::OpTypeMatrix: // ...
+ case spv::OpTypePipe: return range_t(3, 4);
+ case spv::OpConstant: return range_t(3, maxCount);
+ default: return range_t(0, 0);
+ }
+ }
+
+ spirvbin_t::range_t spirvbin_t::typeRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ if (isConstOp(opCode))
+ return range_t(1, 2);
+
+ switch (opCode) {
+ case spv::OpTypeVector: // fall through
+ case spv::OpTypeMatrix: // ...
+ case spv::OpTypeSampler: // ...
+ case spv::OpTypeArray: // ...
+ case spv::OpTypeRuntimeArray: // ...
+ case spv::OpTypePipe: return range_t(2, 3);
+ case spv::OpTypeStruct: // fall through
+ case spv::OpTypeFunction: return range_t(2, maxCount);
+ case spv::OpTypePointer: return range_t(3, 4);
+ default: return range_t(0, 0);
+ }
+ }
+
+ spirvbin_t::range_t spirvbin_t::constRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ switch (opCode) {
+ case spv::OpTypeArray: // fall through...
+ case spv::OpTypeRuntimeArray: return range_t(3, 4);
+ case spv::OpConstantComposite: return range_t(3, maxCount);
+ default: return range_t(0, 0);
+ }
+ }
+
+ // Return the size of a type in 32-bit words. This currently only
+ // handles ints and floats, and is only invoked by queries which must be
+ // integer types. If ever needed, it can be generalized.
+ unsigned spirvbin_t::typeSizeInWords(spv::Id id) const
+ {
+ const unsigned typeStart = idPos(id);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ if (errorLatch)
+ return 0;
+
+ switch (opCode) {
+ case spv::OpTypeInt: // fall through...
+ case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32;
+ default:
+ return 0;
+ }
+ }
+
+ // Looks up the type of a given const or variable ID, and
+ // returns its size in 32-bit words.
+ unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const
+ {
+ const auto tid_it = idTypeSizeMap.find(id);
+ if (tid_it == idTypeSizeMap.end()) {
+ error("type size for ID not found");
+ return 0;
+ }
+
+ return tid_it->second;
+ }
+
+ // Is this an opcode we should remove when using --strip?
+ bool spirvbin_t::isStripOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpSource:
+ case spv::OpSourceExtension:
+ case spv::OpName:
+ case spv::OpMemberName:
+ case spv::OpLine: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode is flow control
+ bool spirvbin_t::isFlowCtrl(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpBranchConditional:
+ case spv::OpBranch:
+ case spv::OpSwitch:
+ case spv::OpLoopMerge:
+ case spv::OpSelectionMerge:
+ case spv::OpLabel:
+ case spv::OpFunction:
+ case spv::OpFunctionEnd: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode defines a type
+ bool spirvbin_t::isTypeOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpTypeVoid:
+ case spv::OpTypeBool:
+ case spv::OpTypeInt:
+ case spv::OpTypeFloat:
+ case spv::OpTypeVector:
+ case spv::OpTypeMatrix:
+ case spv::OpTypeImage:
+ case spv::OpTypeSampler:
+ case spv::OpTypeArray:
+ case spv::OpTypeRuntimeArray:
+ case spv::OpTypeStruct:
+ case spv::OpTypeOpaque:
+ case spv::OpTypePointer:
+ case spv::OpTypeFunction:
+ case spv::OpTypeEvent:
+ case spv::OpTypeDeviceEvent:
+ case spv::OpTypeReserveId:
+ case spv::OpTypeQueue:
+ case spv::OpTypeSampledImage:
+ case spv::OpTypePipe: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode defines a constant
+ bool spirvbin_t::isConstOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpConstantSampler:
+ error("unimplemented constant type");
+ return true;
+
+ case spv::OpConstantNull:
+ case spv::OpConstantTrue:
+ case spv::OpConstantFalse:
+ case spv::OpConstantComposite:
+ case spv::OpConstant:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ const auto inst_fn_nop = [](spv::Op, unsigned) { return false; };
+ const auto op_fn_nop = [](spv::Id&) { };
+
+ // g++ doesn't like these defined in the class proper in an anonymous namespace.
+ // Dunno why. Also MSVC doesn't like the constexpr keyword. Also dunno why.
+ // Defining them externally seems to please both compilers, so, here they are.
+ const spv::Id spirvbin_t::unmapped = spv::Id(-10000);
+ const spv::Id spirvbin_t::unused = spv::Id(-10001);
+ const int spirvbin_t::header_size = 5;
+
+ spv::Id spirvbin_t::nextUnusedId(spv::Id id)
+ {
+ while (isNewIdMapped(id)) // search for an unused ID
+ ++id;
+
+ return id;
+ }
+
+ spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId)
+ {
+ //assert(id != spv::NoResult && newId != spv::NoResult);
+
+ if (id > bound()) {
+ error(std::string("ID out of range: ") + std::to_string(id));
+ return spirvbin_t::unused;
+ }
+
+ if (id >= idMapL.size())
+ idMapL.resize(id+1, unused);
+
+ if (newId != unmapped && newId != unused) {
+ if (isOldIdUnused(id)) {
+ error(std::string("ID unused in module: ") + std::to_string(id));
+ return spirvbin_t::unused;
+ }
+
+ if (!isOldIdUnmapped(id)) {
+ error(std::string("ID already mapped: ") + std::to_string(id) + " -> "
+ + std::to_string(localId(id)));
+
+ return spirvbin_t::unused;
+ }
+
+ if (isNewIdMapped(newId)) {
+ error(std::string("ID already used in module: ") + std::to_string(newId));
+ return spirvbin_t::unused;
+ }
+
+ msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId));
+ setMapped(newId);
+ largestNewId = std::max(largestNewId, newId);
+ }
+
+ return idMapL[id] = newId;
+ }
+
+ // Parse a literal string from the SPIR binary and return it as an std::string
+ // Due to C++11 RValue references, this doesn't copy the result string.
+ std::string spirvbin_t::literalString(unsigned word) const
+ {
+ std::string literal;
+
+ literal.reserve(16);
+
+ const char* bytes = reinterpret_cast<const char*>(spv.data() + word);
+
+ while (bytes && *bytes)
+ literal += *bytes++;
+
+ return literal;
+ }
+
+ void spirvbin_t::applyMap()
+ {
+ msg(3, 2, std::string("Applying map: "));
+
+ // Map local IDs through the ID map
+ process(inst_fn_nop, // ignore instructions
+ [this](spv::Id& id) {
+ id = localId(id);
+
+ if (errorLatch)
+ return;
+
+ assert(id != unused && id != unmapped);
+ }
+ );
+ }
+
+ // Find free IDs for anything we haven't mapped
+ void spirvbin_t::mapRemainder()
+ {
+ msg(3, 2, std::string("Remapping remainder: "));
+
+ spv::Id unusedId = 1; // can't use 0: that's NoResult
+ spirword_t maxBound = 0;
+
+ for (spv::Id id = 0; id < idMapL.size(); ++id) {
+ if (isOldIdUnused(id))
+ continue;
+
+ // Find a new mapping for any used but unmapped IDs
+ if (isOldIdUnmapped(id)) {
+ localId(id, unusedId = nextUnusedId(unusedId));
+ if (errorLatch)
+ return;
+ }
+
+ if (isOldIdUnmapped(id)) {
+ error(std::string("old ID not mapped: ") + std::to_string(id));
+ return;
+ }
+
+ // Track max bound
+ maxBound = std::max(maxBound, localId(id) + 1);
+
+ if (errorLatch)
+ return;
+ }
+
+ bound(maxBound); // reset header ID bound to as big as it now needs to be
+ }
+
+ // Mark debug instructions for stripping
+ void spirvbin_t::stripDebug()
+ {
+ // Strip instructions in the stripOp set: debug info.
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // remember opcodes we want to strip later
+ if (isStripOp(opCode))
+ stripInst(start);
+ return true;
+ },
+ op_fn_nop);
+ }
+
+ // Mark instructions that refer to now-removed IDs for stripping
+ void spirvbin_t::stripDeadRefs()
+ {
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // strip opcodes pointing to removed data
+ switch (opCode) {
+ case spv::OpName:
+ case spv::OpMemberName:
+ case spv::OpDecorate:
+ case spv::OpMemberDecorate:
+ if (idPosR.find(asId(start+1)) == idPosR.end())
+ stripInst(start);
+ break;
+ default:
+ break; // leave it alone
+ }
+
+ return true;
+ },
+ op_fn_nop);
+
+ strip();
+ }
+
+ // Update local maps of ID, type, etc positions
+ void spirvbin_t::buildLocalMaps()
+ {
+ msg(2, 2, std::string("build local maps: "));
+
+ mapped.clear();
+ idMapL.clear();
+// preserve nameMap, so we don't clear that.
+ fnPos.clear();
+ fnCalls.clear();
+ typeConstPos.clear();
+ idPosR.clear();
+ entryPoint = spv::NoResult;
+ largestNewId = 0;
+
+ idMapL.resize(bound(), unused);
+
+ int fnStart = 0;
+ spv::Id fnRes = spv::NoResult;
+
+ // build local Id and name maps
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ unsigned word = start+1;
+ spv::Id typeId = spv::NoResult;
+
+ if (spv::InstructionDesc[opCode].hasType())
+ typeId = asId(word++);
+
+ // If there's a result ID, remember the size of its type
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ const spv::Id resultId = asId(word++);
+ idPosR[resultId] = start;
+
+ if (typeId != spv::NoResult) {
+ const unsigned idTypeSize = typeSizeInWords(typeId);
+
+ if (errorLatch)
+ return false;
+
+ if (idTypeSize != 0)
+ idTypeSizeMap[resultId] = idTypeSize;
+ }
+ }
+
+ if (opCode == spv::Op::OpName) {
+ const spv::Id target = asId(start+1);
+ const std::string name = literalString(start+2);
+ nameMap[name] = target;
+
+ } else if (opCode == spv::Op::OpFunctionCall) {
+ ++fnCalls[asId(start + 3)];
+ } else if (opCode == spv::Op::OpEntryPoint) {
+ entryPoint = asId(start + 2);
+ } else if (opCode == spv::Op::OpFunction) {
+ if (fnStart != 0) {
+ error("nested function found");
+ return false;
+ }
+
+ fnStart = start;
+ fnRes = asId(start + 2);
+ } else if (opCode == spv::Op::OpFunctionEnd) {
+ assert(fnRes != spv::NoResult);
+ if (fnStart == 0) {
+ error("function end without function start");
+ return false;
+ }
+
+ fnPos[fnRes] = range_t(fnStart, start + asWordCount(start));
+ fnStart = 0;
+ } else if (isConstOp(opCode)) {
+ if (errorLatch)
+ return false;
+
+ assert(asId(start + 2) != spv::NoResult);
+ typeConstPos.insert(start);
+ } else if (isTypeOp(opCode)) {
+ assert(asId(start + 1) != spv::NoResult);
+ typeConstPos.insert(start);
+ }
+
+ return false;
+ },
+
+ [this](spv::Id& id) { localId(id, unmapped); }
+ );
+ }
+
+ // Validate the SPIR header
+ void spirvbin_t::validate() const
+ {
+ msg(2, 2, std::string("validating: "));
+
+ if (spv.size() < header_size) {
+ error("file too short: ");
+ return;
+ }
+
+ if (magic() != spv::MagicNumber) {
+ error("bad magic number");
+ return;
+ }
+
+ // field 1 = version
+ // field 2 = generator magic
+ // field 3 = result <id> bound
+
+ if (schemaNum() != 0) {
+ error("bad schema, must be 0");
+ return;
+ }
+ }
+
+ int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn)
+ {
+ const auto instructionStart = word;
+ const unsigned wordCount = asWordCount(instructionStart);
+ const int nextInst = word++ + wordCount;
+ spv::Op opCode = asOpCode(instructionStart);
+
+ if (nextInst > int(spv.size())) {
+ error("spir instruction terminated too early");
+ return -1;
+ }
+
+ // Base for computing number of operands; will be updated as more is learned
+ unsigned numOperands = wordCount - 1;
+
+ if (instFn(opCode, instructionStart))
+ return nextInst;
+
+ // Read type and result ID from instruction desc table
+ if (spv::InstructionDesc[opCode].hasType()) {
+ idFn(asId(word++));
+ --numOperands;
+ }
+
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ idFn(asId(word++));
+ --numOperands;
+ }
+
+ // Extended instructions: currently, assume everything is an ID.
+ // TODO: add whatever data we need for exceptions to that
+ if (opCode == spv::OpExtInst) {
+ word += 2; // instruction set, and instruction from set
+ numOperands -= 2;
+
+ for (unsigned op=0; op < numOperands; ++op)
+ idFn(asId(word++)); // ID
+
+ return nextInst;
+ }
+
+ // Circular buffer so we can look back at previous unmapped values during the mapping pass.
+ static const unsigned idBufferSize = 4;
+ spv::Id idBuffer[idBufferSize];
+ unsigned idBufferPos = 0;
+
+ // Store IDs from instruction in our map
+ for (int op = 0; numOperands > 0; ++op, --numOperands) {
+ // SpecConstantOp is special: it includes the operands of another opcode which is
+ // given as a literal in the 3rd word. We will switch over to pretending that the
+ // opcode being processed is the literal opcode value of the SpecConstantOp. See the
+ // SPIRV spec for details. This way we will handle IDs and literals as appropriate for
+ // the embedded op.
+ if (opCode == spv::OpSpecConstantOp) {
+ if (op == 0) {
+ opCode = asOpCode(word++); // this is the opcode embedded in the SpecConstantOp.
+ --numOperands;
+ }
+ }
+
+ switch (spv::InstructionDesc[opCode].operands.getClass(op)) {
+ case spv::OperandId:
+ case spv::OperandScope:
+ case spv::OperandMemorySemantics:
+ idBuffer[idBufferPos] = asId(word);
+ idBufferPos = (idBufferPos + 1) % idBufferSize;
+ idFn(asId(word++));
+ break;
+
+ case spv::OperandVariableIds:
+ for (unsigned i = 0; i < numOperands; ++i)
+ idFn(asId(word++));
+ return nextInst;
+
+ case spv::OperandVariableLiterals:
+ // for clarity
+ // if (opCode == spv::OpDecorate && asDecoration(word - 1) == spv::DecorationBuiltIn) {
+ // ++word;
+ // --numOperands;
+ // }
+ // word += numOperands;
+ return nextInst;
+
+ case spv::OperandVariableLiteralId: {
+ if (opCode == OpSwitch) {
+ // word-2 is the position of the selector ID. OpSwitch Literals match its type.
+ // In case the IDs are currently being remapped, we get the word[-2] ID from
+ // the circular idBuffer.
+ const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize;
+ const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]);
+ const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize);
+
+ if (errorLatch)
+ return -1;
+
+ for (unsigned arg=0; arg<numLiteralIdPairs; ++arg) {
+ word += literalSize; // literal
+ idFn(asId(word++)); // label
+ }
+ } else {
+ assert(0); // currentely, only OpSwitch uses OperandVariableLiteralId
+ }
+
+ return nextInst;
+ }
+
+ case spv::OperandLiteralString: {
+ const int stringWordCount = literalStringWords(literalString(word));
+ word += stringWordCount;
+ numOperands -= (stringWordCount-1); // -1 because for() header post-decrements
+ break;
+ }
+
+ // Execution mode might have extra literal operands. Skip them.
+ case spv::OperandExecutionMode:
+ return nextInst;
+
+ // Single word operands we simply ignore, as they hold no IDs
+ case spv::OperandLiteralNumber:
+ case spv::OperandSource:
+ case spv::OperandExecutionModel:
+ case spv::OperandAddressing:
+ case spv::OperandMemory:
+ case spv::OperandStorage:
+ case spv::OperandDimensionality:
+ case spv::OperandSamplerAddressingMode:
+ case spv::OperandSamplerFilterMode:
+ case spv::OperandSamplerImageFormat:
+ case spv::OperandImageChannelOrder:
+ case spv::OperandImageChannelDataType:
+ case spv::OperandImageOperands:
+ case spv::OperandFPFastMath:
+ case spv::OperandFPRoundingMode:
+ case spv::OperandLinkageType:
+ case spv::OperandAccessQualifier:
+ case spv::OperandFuncParamAttr:
+ case spv::OperandDecoration:
+ case spv::OperandBuiltIn:
+ case spv::OperandSelect:
+ case spv::OperandLoop:
+ case spv::OperandFunction:
+ case spv::OperandMemoryAccess:
+ case spv::OperandGroupOperation:
+ case spv::OperandKernelEnqueueFlags:
+ case spv::OperandKernelProfilingInfo:
+ case spv::OperandCapability:
+ ++word;
+ break;
+
+ default:
+ assert(0 && "Unhandled Operand Class");
+ break;
+ }
+ }
+
+ return nextInst;
+ }
+
+ // Make a pass over all the instructions and process them given appropriate functions
+ spirvbin_t& spirvbin_t::process(instfn_t instFn, idfn_t idFn, unsigned begin, unsigned end)
+ {
+ // For efficiency, reserve name map space. It can grow if needed.
+ nameMap.reserve(32);
+
+ // If begin or end == 0, use defaults
+ begin = (begin == 0 ? header_size : begin);
+ end = (end == 0 ? unsigned(spv.size()) : end);
+
+ // basic parsing and InstructionDesc table borrowed from SpvDisassemble.cpp...
+ unsigned nextInst = unsigned(spv.size());
+
+ for (unsigned word = begin; word < end; word = nextInst) {
+ nextInst = processInstruction(word, instFn, idFn);
+
+ if (errorLatch)
+ return *this;
+ }
+
+ return *this;
+ }
+
+ // Apply global name mapping to a single module
+ void spirvbin_t::mapNames()
+ {
+ static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 3019; // offset into ID space
+
+ for (const auto& name : nameMap) {
+ std::uint32_t hashval = 1911;
+ for (const char c : name.first)
+ hashval = hashval * 1009 + c;
+
+ if (isOldIdUnmapped(name.second)) {
+ localId(name.second, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+ }
+ }
+
+ // Map fn contents to IDs of similar functions in other modules
+ void spirvbin_t::mapFnBodies()
+ {
+ static const std::uint32_t softTypeIdLimit = 19071; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 6203; // offset into ID space
+
+ // Initial approach: go through some high priority opcodes first and assign them
+ // hash values.
+
+ spv::Id fnId = spv::NoResult;
+ std::vector<unsigned> instPos;
+ instPos.reserve(unsigned(spv.size()) / 16); // initial estimate; can grow if needed.
+
+ // Build local table of instruction start positions
+ process(
+ [&](spv::Op, unsigned start) { instPos.push_back(start); return true; },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ // Window size for context-sensitive canonicalization values
+ // Empirical best size from a single data set. TODO: Would be a good tunable.
+ // We essentially perform a little convolution around each instruction,
+ // to capture the flavor of nearby code, to hopefully match to similar
+ // code in other modules.
+ static const unsigned windowSize = 2;
+
+ for (unsigned entry = 0; entry < unsigned(instPos.size()); ++entry) {
+ const unsigned start = instPos[entry];
+ const spv::Op opCode = asOpCode(start);
+
+ if (opCode == spv::OpFunction)
+ fnId = asId(start + 2);
+
+ if (opCode == spv::OpFunctionEnd)
+ fnId = spv::NoResult;
+
+ if (fnId != spv::NoResult) { // if inside a function
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ const unsigned word = start + (spv::InstructionDesc[opCode].hasType() ? 2 : 1);
+ const spv::Id resId = asId(word);
+ std::uint32_t hashval = fnId * 17; // small prime
+
+ for (unsigned i = entry-1; i >= entry-windowSize; --i) {
+ if (asOpCode(instPos[i]) == spv::OpFunction)
+ break;
+ hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+ }
+
+ for (unsigned i = entry; i <= entry + windowSize; ++i) {
+ if (asOpCode(instPos[i]) == spv::OpFunctionEnd)
+ break;
+ hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+ }
+
+ if (isOldIdUnmapped(resId)) {
+ localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+
+ }
+ }
+ }
+
+ spv::Op thisOpCode(spv::OpNop);
+ std::unordered_map<int, int> opCounter;
+ int idCounter(0);
+ fnId = spv::NoResult;
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ switch (opCode) {
+ case spv::OpFunction:
+ // Reset counters at each function
+ idCounter = 0;
+ opCounter.clear();
+ fnId = asId(start + 2);
+ break;
+
+ case spv::OpImageSampleImplicitLod:
+ case spv::OpImageSampleExplicitLod:
+ case spv::OpImageSampleDrefImplicitLod:
+ case spv::OpImageSampleDrefExplicitLod:
+ case spv::OpImageSampleProjImplicitLod:
+ case spv::OpImageSampleProjExplicitLod:
+ case spv::OpImageSampleProjDrefImplicitLod:
+ case spv::OpImageSampleProjDrefExplicitLod:
+ case spv::OpDot:
+ case spv::OpCompositeExtract:
+ case spv::OpCompositeInsert:
+ case spv::OpVectorShuffle:
+ case spv::OpLabel:
+ case spv::OpVariable:
+
+ case spv::OpAccessChain:
+ case spv::OpLoad:
+ case spv::OpStore:
+ case spv::OpCompositeConstruct:
+ case spv::OpFunctionCall:
+ ++opCounter[opCode];
+ idCounter = 0;
+ thisOpCode = opCode;
+ break;
+ default:
+ thisOpCode = spv::OpNop;
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) {
+ if (thisOpCode != spv::OpNop) {
+ ++idCounter;
+ const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117;
+
+ if (isOldIdUnmapped(id))
+ localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ }
+ });
+ }
+
+ // EXPERIMENTAL: forward IO and uniform load/stores into operands
+ // This produces invalid Schema-0 SPIRV
+ void spirvbin_t::forwardLoadStores()
+ {
+ idset_t fnLocalVars; // set of function local vars
+ idmap_t idMap; // Map of load result IDs to what they load
+
+ // EXPERIMENTAL: Forward input and access chain loads into consumptions
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // Add inputs and uniforms to the map
+ if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+ (spv[start+3] == spv::StorageClassUniform ||
+ spv[start+3] == spv::StorageClassUniformConstant ||
+ spv[start+3] == spv::StorageClassInput))
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpAccessChain && fnLocalVars.count(asId(start+3)) > 0)
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+ idMap[asId(start+2)] = asId(start+3);
+ stripInst(start);
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // EXPERIMENTAL: Implicit output stores
+ fnLocalVars.clear();
+ idMap.clear();
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // Add inputs and uniforms to the map
+ if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+ (spv[start+3] == spv::StorageClassOutput))
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+ idMap[asId(start+2)] = asId(start+1);
+ stripInst(start);
+ }
+
+ return false;
+ },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ process(
+ inst_fn_nop,
+ [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ strip(); // strip out data we decided to eliminate
+ }
+
+ // optimize loads and stores
+ void spirvbin_t::optLoadStore()
+ {
+ idset_t fnLocalVars; // candidates for removal (only locals)
+ idmap_t idMap; // Map of load result IDs to what they load
+ blockmap_t blockMap; // Map of IDs to blocks they first appear in
+ int blockNum = 0; // block count, to avoid crossing flow control
+
+ // Find all the function local pointers stored at most once, and not via access chains
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ const int wordCount = asWordCount(start);
+
+ // Count blocks, so we can avoid crossing flow control
+ if (isFlowCtrl(opCode))
+ ++blockNum;
+
+ // Add local variables to the map
+ if ((opCode == spv::OpVariable && spv[start+3] == spv::StorageClassFunction && asWordCount(start) == 4)) {
+ fnLocalVars.insert(asId(start+2));
+ return true;
+ }
+
+ // Ignore process vars referenced via access chain
+ if ((opCode == spv::OpAccessChain || opCode == spv::OpInBoundsAccessChain) && fnLocalVars.count(asId(start+3)) > 0) {
+ fnLocalVars.erase(asId(start+3));
+ idMap.erase(asId(start+3));
+ return true;
+ }
+
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+ const spv::Id varId = asId(start+3);
+
+ // Avoid loads before stores
+ if (idMap.find(varId) == idMap.end()) {
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // don't do for volatile references
+ if (wordCount > 4 && (spv[start+4] & spv::MemoryAccessVolatileMask)) {
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // Handle flow control
+ if (blockMap.find(varId) == blockMap.end()) {
+ blockMap[varId] = blockNum; // track block we found it in.
+ } else if (blockMap[varId] != blockNum) {
+ fnLocalVars.erase(varId); // Ignore if crosses flow control
+ idMap.erase(varId);
+ }
+
+ return true;
+ }
+
+ if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+ const spv::Id varId = asId(start+1);
+
+ if (idMap.find(varId) == idMap.end()) {
+ idMap[varId] = asId(start+2);
+ } else {
+ // Remove if it has more than one store to the same pointer
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // don't do for volatile references
+ if (wordCount > 3 && (spv[start+3] & spv::MemoryAccessVolatileMask)) {
+ fnLocalVars.erase(asId(start+3));
+ idMap.erase(asId(start+3));
+ }
+
+ // Handle flow control
+ if (blockMap.find(varId) == blockMap.end()) {
+ blockMap[varId] = blockNum; // track block we found it in.
+ } else if (blockMap[varId] != blockNum) {
+ fnLocalVars.erase(varId); // Ignore if crosses flow control
+ idMap.erase(varId);
+ }
+
+ return true;
+ }
+
+ return false;
+ },
+
+ // If local var id used anywhere else, don't eliminate
+ [&](spv::Id& id) {
+ if (fnLocalVars.count(id) > 0) {
+ fnLocalVars.erase(id);
+ idMap.erase(id);
+ }
+ }
+ );
+
+ if (errorLatch)
+ return;
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0)
+ idMap[asId(start+2)] = idMap[asId(start+3)];
+ return false;
+ },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ // Chase replacements to their origins, in case there is a chain such as:
+ // 2 = store 1
+ // 3 = load 2
+ // 4 = store 3
+ // 5 = load 4
+ // We want to replace uses of 5 with 1.
+ for (const auto& idPair : idMap) {
+ spv::Id id = idPair.first;
+ while (idMap.find(id) != idMap.end()) // Chase to end of chain
+ id = idMap[id];
+
+ idMap[idPair.first] = id; // replace with final result
+ }
+
+ // Remove the load/store/variables for the ones we've discovered
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if ((opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) ||
+ (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) ||
+ (opCode == spv::OpVariable && fnLocalVars.count(asId(start+2)) > 0)) {
+
+ stripInst(start);
+ return true;
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) {
+ if (idMap.find(id) != idMap.end()) id = idMap[id];
+ }
+ );
+
+ if (errorLatch)
+ return;
+
+ strip(); // strip out data we decided to eliminate
+ }
+
+ // remove bodies of uncalled functions
+ void spirvbin_t::dceFuncs()
+ {
+ msg(3, 2, std::string("Removing Dead Functions: "));
+
+ // TODO: There are more efficient ways to do this.
+ bool changed = true;
+
+ while (changed) {
+ changed = false;
+
+ for (auto fn = fnPos.begin(); fn != fnPos.end(); ) {
+ if (fn->first == entryPoint) { // don't DCE away the entry point!
+ ++fn;
+ continue;
+ }
+
+ const auto call_it = fnCalls.find(fn->first);
+
+ if (call_it == fnCalls.end() || call_it->second == 0) {
+ changed = true;
+ stripRange.push_back(fn->second);
+
+ // decrease counts of called functions
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::Op::OpFunctionCall) {
+ const auto call_it = fnCalls.find(asId(start + 3));
+ if (call_it != fnCalls.end()) {
+ if (--call_it->second <= 0)
+ fnCalls.erase(call_it);
+ }
+ }
+
+ return true;
+ },
+ op_fn_nop,
+ fn->second.first,
+ fn->second.second);
+
+ if (errorLatch)
+ return;
+
+ fn = fnPos.erase(fn);
+ } else ++fn;
+ }
+ }
+ }
+
+ // remove unused function variables + decorations
+ void spirvbin_t::dceVars()
+ {
+ msg(3, 2, std::string("DCE Vars: "));
+
+ std::unordered_map<spv::Id, int> varUseCount;
+
+ // Count function variable use
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::OpVariable) {
+ ++varUseCount[asId(start+2)];
+ return true;
+ } else if (opCode == spv::OpEntryPoint) {
+ const int wordCount = asWordCount(start);
+ for (int i = 4; i < wordCount; i++) {
+ ++varUseCount[asId(start+i)];
+ }
+ return true;
+ } else
+ return false;
+ },
+
+ [&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // Remove single-use function variables + associated decorations and names
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ spv::Id id = spv::NoResult;
+ if (opCode == spv::OpVariable)
+ id = asId(start+2);
+ if (opCode == spv::OpDecorate || opCode == spv::OpName)
+ id = asId(start+1);
+
+ if (id != spv::NoResult && varUseCount[id] == 1)
+ stripInst(start);
+
+ return true;
+ },
+ op_fn_nop);
+ }
+
+ // remove unused types
+ void spirvbin_t::dceTypes()
+ {
+ std::vector<bool> isType(bound(), false);
+
+ // for speed, make O(1) way to get to type query (map is log(n))
+ for (const auto typeStart : typeConstPos)
+ isType[asTypeConstId(typeStart)] = true;
+
+ std::unordered_map<spv::Id, int> typeUseCount;
+
+ // This is not the most efficient algorithm, but this is an offline tool, and
+ // it's easy to write this way. Can be improved opportunistically if needed.
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ strip();
+ typeUseCount.clear();
+
+ // Count total type usage
+ process(inst_fn_nop,
+ [&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // Remove single reference types
+ for (const auto typeStart : typeConstPos) {
+ const spv::Id typeId = asTypeConstId(typeStart);
+ if (typeUseCount[typeId] == 1) {
+ changed = true;
+ --typeUseCount[typeId];
+ stripInst(typeStart);
+ }
+ }
+
+ if (errorLatch)
+ return;
+ }
+ }
+
+#ifdef NOTDEF
+ bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const
+ {
+ // Find the local type id "lt" and global type id "gt"
+ const auto lt_it = typeConstPosR.find(lt);
+ if (lt_it == typeConstPosR.end())
+ return false;
+
+ const auto typeStart = lt_it->second;
+
+ // Search for entry in global table
+ const auto gtype = globalTypes.find(gt);
+ if (gtype == globalTypes.end())
+ return false;
+
+ const auto& gdata = gtype->second;
+
+ // local wordcount and opcode
+ const int wordCount = asWordCount(typeStart);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ // no type match if opcodes don't match, or operand count doesn't match
+ if (opCode != opOpCode(gdata[0]) || wordCount != opWordCount(gdata[0]))
+ return false;
+
+ const unsigned numOperands = wordCount - 2; // all types have a result
+
+ const auto cmpIdRange = [&](range_t range) {
+ for (int x=range.first; x<std::min(range.second, wordCount); ++x)
+ if (!matchType(globalTypes, asId(typeStart+x), gdata[x]))
+ return false;
+ return true;
+ };
+
+ const auto cmpConst = [&]() { return cmpIdRange(constRange(opCode)); };
+ const auto cmpSubType = [&]() { return cmpIdRange(typeRange(opCode)); };
+
+ // Compare literals in range [start,end)
+ const auto cmpLiteral = [&]() {
+ const auto range = literalRange(opCode);
+ return std::equal(spir.begin() + typeStart + range.first,
+ spir.begin() + typeStart + std::min(range.second, wordCount),
+ gdata.begin() + range.first);
+ };
+
+ assert(isTypeOp(opCode) || isConstOp(opCode));
+
+ switch (opCode) {
+ case spv::OpTypeOpaque: // TODO: disable until we compare the literal strings.
+ case spv::OpTypeQueue: return false;
+ case spv::OpTypeEvent: // fall through...
+ case spv::OpTypeDeviceEvent: // ...
+ case spv::OpTypeReserveId: return false;
+ // for samplers, we don't handle the optional parameters yet
+ case spv::OpTypeSampler: return cmpLiteral() && cmpConst() && cmpSubType() && wordCount == 8;
+ default: return cmpLiteral() && cmpConst() && cmpSubType();
+ }
+ }
+
+ // Look for an equivalent type in the globalTypes map
+ spv::Id spirvbin_t::findType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt) const
+ {
+ // Try a recursive type match on each in turn, and return a match if we find one
+ for (const auto& gt : globalTypes)
+ if (matchType(globalTypes, lt, gt.first))
+ return gt.first;
+
+ return spv::NoType;
+ }
+#endif // NOTDEF
+
+ // Return start position in SPV of given Id. error if not found.
+ unsigned spirvbin_t::idPos(spv::Id id) const
+ {
+ const auto tid_it = idPosR.find(id);
+ if (tid_it == idPosR.end()) {
+ error("ID not found");
+ return 0;
+ }
+
+ return tid_it->second;
+ }
+
+ // Hash types to canonical values. This can return ID collisions (it's a bit
+ // inevitable): it's up to the caller to handle that gracefully.
+ std::uint32_t spirvbin_t::hashType(unsigned typeStart) const
+ {
+ const unsigned wordCount = asWordCount(typeStart);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ switch (opCode) {
+ case spv::OpTypeVoid: return 0;
+ case spv::OpTypeBool: return 1;
+ case spv::OpTypeInt: return 3 + (spv[typeStart+3]);
+ case spv::OpTypeFloat: return 5;
+ case spv::OpTypeVector:
+ return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+ case spv::OpTypeMatrix:
+ return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+ case spv::OpTypeImage:
+ return 120 + hashType(idPos(spv[typeStart+2])) +
+ spv[typeStart+3] + // dimensionality
+ spv[typeStart+4] * 8 * 16 + // depth
+ spv[typeStart+5] * 4 * 16 + // arrayed
+ spv[typeStart+6] * 2 * 16 + // multisampled
+ spv[typeStart+7] * 1 * 16; // format
+ case spv::OpTypeSampler:
+ return 500;
+ case spv::OpTypeSampledImage:
+ return 502;
+ case spv::OpTypeArray:
+ return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3];
+ case spv::OpTypeRuntimeArray:
+ return 5000 + hashType(idPos(spv[typeStart+2]));
+ case spv::OpTypeStruct:
+ {
+ std::uint32_t hash = 10000;
+ for (unsigned w=2; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+
+ case spv::OpTypeOpaque: return 6000 + spv[typeStart+2];
+ case spv::OpTypePointer: return 100000 + hashType(idPos(spv[typeStart+3]));
+ case spv::OpTypeFunction:
+ {
+ std::uint32_t hash = 200000;
+ for (unsigned w=2; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+
+ case spv::OpTypeEvent: return 300000;
+ case spv::OpTypeDeviceEvent: return 300001;
+ case spv::OpTypeReserveId: return 300002;
+ case spv::OpTypeQueue: return 300003;
+ case spv::OpTypePipe: return 300004;
+ case spv::OpConstantTrue: return 300007;
+ case spv::OpConstantFalse: return 300008;
+ case spv::OpConstantComposite:
+ {
+ std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+ case spv::OpConstant:
+ {
+ std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * spv[typeStart+w];
+ return hash;
+ }
+ case spv::OpConstantNull:
+ {
+ std::uint32_t hash = 500009 + hashType(idPos(spv[typeStart+1]));
+ return hash;
+ }
+ case spv::OpConstantSampler:
+ {
+ std::uint32_t hash = 600011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * spv[typeStart+w];
+ return hash;
+ }
+
+ default:
+ error("unknown type opcode");
+ return 0;
+ }
+ }
+
+ void spirvbin_t::mapTypeConst()
+ {
+ globaltypes_t globalTypeMap;
+
+ msg(3, 2, std::string("Remapping Consts & Types: "));
+
+ static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 8; // offset into ID space
+
+ for (auto& typeStart : typeConstPos) {
+ const spv::Id resId = asTypeConstId(typeStart);
+ const std::uint32_t hashval = hashType(typeStart);
+
+ if (errorLatch)
+ return;
+
+ if (isOldIdUnmapped(resId)) {
+ localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+ }
+ }
+
+ // Strip a single binary by removing ranges given in stripRange
+ void spirvbin_t::strip()
+ {
+ if (stripRange.empty()) // nothing to do
+ return;
+
+ // Sort strip ranges in order of traversal
+ std::sort(stripRange.begin(), stripRange.end());
+
+ // Allocate a new binary big enough to hold old binary
+ // We'll step this iterator through the strip ranges as we go through the binary
+ auto strip_it = stripRange.begin();
+
+ int strippedPos = 0;
+ for (unsigned word = 0; word < unsigned(spv.size()); ++word) {
+ while (strip_it != stripRange.end() && word >= strip_it->second)
+ ++strip_it;
+
+ if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second)
+ spv[strippedPos++] = spv[word];
+ }
+
+ spv.resize(strippedPos);
+ stripRange.clear();
+
+ buildLocalMaps();
+ }
+
+ // Strip a single binary by removing ranges given in stripRange
+ void spirvbin_t::remap(std::uint32_t opts)
+ {
+ options = opts;
+
+ // Set up opcode tables from SpvDoc
+ spv::Parameterize();
+
+ validate(); // validate header
+ buildLocalMaps(); // build ID maps
+
+ msg(3, 4, std::string("ID bound: ") + std::to_string(bound()));
+
+ if (options & STRIP) stripDebug();
+ if (errorLatch) return;
+
+ strip(); // strip out data we decided to eliminate
+ if (errorLatch) return;
+
+ if (options & OPT_LOADSTORE) optLoadStore();
+ if (errorLatch) return;
+
+ if (options & OPT_FWD_LS) forwardLoadStores();
+ if (errorLatch) return;
+
+ if (options & DCE_FUNCS) dceFuncs();
+ if (errorLatch) return;
+
+ if (options & DCE_VARS) dceVars();
+ if (errorLatch) return;
+
+ if (options & DCE_TYPES) dceTypes();
+ if (errorLatch) return;
+
+ strip(); // strip out data we decided to eliminate
+ if (errorLatch) return;
+
+ stripDeadRefs(); // remove references to things we DCEed
+ if (errorLatch) return;
+
+ // after the last strip, we must clean any debug info referring to now-deleted data
+
+ if (options & MAP_TYPES) mapTypeConst();
+ if (errorLatch) return;
+
+ if (options & MAP_NAMES) mapNames();
+ if (errorLatch) return;
+
+ if (options & MAP_FUNCS) mapFnBodies();
+ if (errorLatch) return;
+
+ if (options & MAP_ALL) {
+ mapRemainder(); // map any unmapped IDs
+ if (errorLatch) return;
+
+ applyMap(); // Now remap each shader to the new IDs we've come up with
+ if (errorLatch) return;
+ }
+ }
+
+ // remap from a memory image
+ void spirvbin_t::remap(std::vector<std::uint32_t>& in_spv, std::uint32_t opts)
+ {
+ spv.swap(in_spv);
+ remap(opts);
+ spv.swap(in_spv);
+ }
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+
diff --git a/src/3rdparty/glslang/SPIRV/SPVRemapper.h b/src/3rdparty/glslang/SPIRV/SPVRemapper.h
new file mode 100644
index 0000000..97e3f31
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SPVRemapper.h
@@ -0,0 +1,304 @@
+//
+// Copyright (C) 2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef SPIRVREMAPPER_H
+#define SPIRVREMAPPER_H
+
+#include <string>
+#include <vector>
+#include <cstdlib>
+#include <exception>
+
+namespace spv {
+
+// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
+// We handle that here by making our own symbol.
+#if __cplusplus >= 201103L || _MSC_VER >= 1700
+# define use_cpp11 1
+#endif
+
+class spirvbin_base_t
+{
+public:
+ enum Options {
+ NONE = 0,
+ STRIP = (1<<0),
+ MAP_TYPES = (1<<1),
+ MAP_NAMES = (1<<2),
+ MAP_FUNCS = (1<<3),
+ DCE_FUNCS = (1<<4),
+ DCE_VARS = (1<<5),
+ DCE_TYPES = (1<<6),
+ OPT_LOADSTORE = (1<<7),
+ OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV
+ MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS),
+ DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES),
+ OPT_ALL = (OPT_LOADSTORE),
+
+ ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL),
+ DO_EVERYTHING = (STRIP | ALL_BUT_STRIP)
+ };
+};
+
+} // namespace SPV
+
+#if !defined (use_cpp11)
+#include <cstdio>
+#include <cstdint>
+
+namespace spv {
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+ spirvbin_t(int /*verbose = 0*/) { }
+
+ void remap(std::vector<std::uint32_t>& /*spv*/, unsigned int /*opts = 0*/)
+ {
+ printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n");
+ exit(5);
+ }
+};
+
+} // namespace SPV
+
+#else // defined (use_cpp11)
+
+#include <functional>
+#include <cstdint>
+#include <unordered_map>
+#include <unordered_set>
+#include <map>
+#include <set>
+#include <cassert>
+
+#include "spirv.hpp"
+#include "spvIR.h"
+
+namespace spv {
+
+// class to hold SPIR-V binary data for remapping, DCE, and debug stripping
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+ spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false)
+ { }
+
+ virtual ~spirvbin_t() { }
+
+ // remap on an existing binary in memory
+ void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
+
+ // Type for error/log handler functions
+ typedef std::function<void(const std::string&)> errorfn_t;
+ typedef std::function<void(const std::string&)> logfn_t;
+
+ // Register error/log handling functions (can be lambda fn / functor / etc)
+ static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
+ static void registerLogHandler(logfn_t handler) { logHandler = handler; }
+
+protected:
+ // This can be overridden to provide other message behavior if needed
+ virtual void msg(int minVerbosity, int indent, const std::string& txt) const;
+
+private:
+ // Local to global, or global to local ID map
+ typedef std::unordered_map<spv::Id, spv::Id> idmap_t;
+ typedef std::unordered_set<spv::Id> idset_t;
+ typedef std::unordered_map<spv::Id, int> blockmap_t;
+
+ void remap(std::uint32_t opts = DO_EVERYTHING);
+
+ // Map of names to IDs
+ typedef std::unordered_map<std::string, spv::Id> namemap_t;
+
+ typedef std::uint32_t spirword_t;
+
+ typedef std::pair<unsigned, unsigned> range_t;
+ typedef std::function<void(spv::Id&)> idfn_t;
+ typedef std::function<bool(spv::Op, unsigned start)> instfn_t;
+
+ // Special Values for ID map:
+ static const spv::Id unmapped; // unchanged from default value
+ static const spv::Id unused; // unused ID
+ static const int header_size; // SPIR header = 5 words
+
+ class id_iterator_t;
+
+ // For mapping type entries between different shaders
+ typedef std::vector<spirword_t> typeentry_t;
+ typedef std::map<spv::Id, typeentry_t> globaltypes_t;
+
+ // A set that preserves position order, and a reverse map
+ typedef std::set<int> posmap_t;
+ typedef std::unordered_map<spv::Id, int> posmap_rev_t;
+
+ // Maps and ID to the size of its base type, if known.
+ typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
+
+ // handle error
+ void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); }
+
+ bool isConstOp(spv::Op opCode) const;
+ bool isTypeOp(spv::Op opCode) const;
+ bool isStripOp(spv::Op opCode) const;
+ bool isFlowCtrl(spv::Op opCode) const;
+ range_t literalRange(spv::Op opCode) const;
+ range_t typeRange(spv::Op opCode) const;
+ range_t constRange(spv::Op opCode) const;
+ unsigned typeSizeInWords(spv::Id id) const;
+ unsigned idTypeSizeInWords(spv::Id id) const;
+
+ spv::Id& asId(unsigned word) { return spv[word]; }
+ const spv::Id& asId(unsigned word) const { return spv[word]; }
+ spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
+ std::uint32_t asOpCodeHash(unsigned word);
+ spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); }
+ unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); }
+ spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
+ unsigned idPos(spv::Id id) const;
+
+ static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
+ static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
+
+ // Header access & set methods
+ spirword_t magic() const { return spv[0]; } // return magic number
+ spirword_t bound() const { return spv[3]; } // return Id bound from header
+ spirword_t bound(spirword_t b) { return spv[3] = b; };
+ spirword_t genmagic() const { return spv[2]; } // generator magic
+ spirword_t genmagic(spirword_t m) { return spv[2] = m; }
+ spirword_t schemaNum() const { return spv[4]; } // schema number from header
+
+ // Mapping fns: get
+ spv::Id localId(spv::Id id) const { return idMapL[id]; }
+
+ // Mapping fns: set
+ inline spv::Id localId(spv::Id id, spv::Id newId);
+ void countIds(spv::Id id);
+
+ // Return next unused new local ID.
+ // NOTE: boost::dynamic_bitset would be more efficient due to find_next(),
+ // which std::vector<bool> doens't have.
+ inline spv::Id nextUnusedId(spv::Id id);
+
+ void buildLocalMaps();
+ std::string literalString(unsigned word) const; // Return literal as a std::string
+ int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; }
+
+ bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); }
+ bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; }
+ bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; }
+ bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); }
+ bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); }
+
+ // bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const;
+ // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const;
+ std::uint32_t hashType(unsigned typeStart) const;
+
+ spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0);
+ int processInstruction(unsigned word, instfn_t, idfn_t);
+
+ void validate() const;
+ void mapTypeConst();
+ void mapFnBodies();
+ void optLoadStore();
+ void dceFuncs();
+ void dceVars();
+ void dceTypes();
+ void mapNames();
+ void foldIds(); // fold IDs to smallest space
+ void forwardLoadStores(); // load store forwarding (EXPERIMENTAL)
+ void offsetIds(); // create relative offset IDs
+
+ void applyMap(); // remap per local name map
+ void mapRemainder(); // map any IDs we haven't touched yet
+ void stripDebug(); // strip all debug info
+ void stripDeadRefs(); // strips debug info for now-dead references after DCE
+ void strip(); // remove debug symbols
+
+ std::vector<spirword_t> spv; // SPIR words
+
+ namemap_t nameMap; // ID names from OpName
+
+ // Since we want to also do binary ops, we can't use std::vector<bool>. we could use
+ // boost::dynamic_bitset, but we're trying to avoid a boost dependency.
+ typedef std::uint64_t bits_t;
+ std::vector<bits_t> mapped; // which new IDs have been mapped
+ static const int mBits = sizeof(bits_t) * 4;
+
+ bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); }
+ void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); }
+ void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); }
+ size_t maxMappedId() const { return mapped.size() * mBits; }
+
+ // Add a strip range for a given instruction starting at 'start'
+ // Note: avoiding brace initializers to please older versions os MSVC.
+ void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); }
+
+ // Function start and end. use unordered_map because we'll have
+ // many fewer functions than IDs.
+ std::unordered_map<spv::Id, range_t> fnPos;
+
+ // Which functions are called, anywhere in the module, with a call count
+ std::unordered_map<spv::Id, int> fnCalls;
+
+ posmap_t typeConstPos; // word positions that define types & consts (ordered)
+ posmap_rev_t idPosR; // reverse map from IDs to positions
+ typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
+
+ std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
+
+ spv::Id entryPoint; // module entry point
+ spv::Id largestNewId; // biggest new ID we have mapped anything to
+
+ // Sections of the binary to strip, given as [begin,end)
+ std::vector<range_t> stripRange;
+
+ // processing options:
+ std::uint32_t options;
+ int verbose; // verbosity level
+
+ // Error latch: this is set if the error handler is ever executed. It would be better to
+ // use a try/catch block and throw, but that's not desired for certain environments, so
+ // this is the alternative.
+ mutable bool errorLatch;
+
+ static errorfn_t errorHandler;
+ static logfn_t logHandler;
+};
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+#endif // SPIRVREMAPPER_H
diff --git a/src/3rdparty/glslang/SPIRV/SpvBuilder.cpp b/src/3rdparty/glslang/SPIRV/SpvBuilder.cpp
new file mode 100644
index 0000000..138c41c
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SpvBuilder.cpp
@@ -0,0 +1,3048 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Helper for making SPIR-V IR. Generally, this is documented in the header
+// SpvBuilder.h.
+//
+
+#include <cassert>
+#include <cstdlib>
+
+#include <unordered_set>
+#include <algorithm>
+
+#include "SpvBuilder.h"
+
+#include "hex_float.h"
+
+#ifndef _WIN32
+ #include <cstdio>
+#endif
+
+namespace spv {
+
+Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogger* buildLogger) :
+ spvVersion(spvVersion),
+ source(SourceLanguageUnknown),
+ sourceVersion(0),
+ sourceFileStringId(NoResult),
+ currentLine(0),
+ currentFile(nullptr),
+ emitOpLines(false),
+ addressModel(AddressingModelLogical),
+ memoryModel(MemoryModelGLSL450),
+ builderNumber(magicNumber),
+ buildPoint(0),
+ uniqueId(0),
+ entryPointFunction(0),
+ generatingOpCodeForSpecConst(false),
+ logger(buildLogger)
+{
+ clearAccessChain();
+}
+
+Builder::~Builder()
+{
+}
+
+Id Builder::import(const char* name)
+{
+ Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport);
+ import->addStringOperand(name);
+ module.mapInstruction(import);
+
+ imports.push_back(std::unique_ptr<Instruction>(import));
+ return import->getResultId();
+}
+
+// Emit instruction for non-filename-based #line directives (ie. no filename
+// seen yet): emit an OpLine if we've been asked to emit OpLines and the line
+// number has changed since the last time, and is a valid line number.
+void Builder::setLine(int lineNum)
+{
+ if (lineNum != 0 && lineNum != currentLine) {
+ currentLine = lineNum;
+ if (emitOpLines)
+ addLine(sourceFileStringId, currentLine, 0);
+ }
+}
+
+// If no filename, do non-filename-based #line emit. Else do filename-based emit.
+// Emit OpLine if we've been asked to emit OpLines and the line number or filename
+// has changed since the last time, and line number is valid.
+void Builder::setLine(int lineNum, const char* filename)
+{
+ if (filename == nullptr) {
+ setLine(lineNum);
+ return;
+ }
+ if ((lineNum != 0 && lineNum != currentLine) || currentFile == nullptr ||
+ strncmp(filename, currentFile, strlen(currentFile) + 1) != 0) {
+ currentLine = lineNum;
+ currentFile = filename;
+ if (emitOpLines) {
+ spv::Id strId = getStringId(filename);
+ addLine(strId, currentLine, 0);
+ }
+ }
+}
+
+void Builder::addLine(Id fileName, int lineNum, int column)
+{
+ Instruction* line = new Instruction(OpLine);
+ line->addIdOperand(fileName);
+ line->addImmediateOperand(lineNum);
+ line->addImmediateOperand(column);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(line));
+}
+
+// For creating new groupedTypes (will return old type if the requested one was already made).
+Id Builder::makeVoidType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeVoid].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeVoid);
+ groupedTypes[OpTypeVoid].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeVoid].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makeBoolType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeBool].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeBool);
+ groupedTypes[OpTypeBool].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeBool].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makeSamplerType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeSampler].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeSampler);
+ groupedTypes[OpTypeSampler].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeSampler].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makePointer(StorageClass storageClass, Id pointee)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)storageClass &&
+ type->getIdOperand(1) == pointee)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypePointer);
+ type->addImmediateOperand(storageClass);
+ type->addIdOperand(pointee);
+ groupedTypes[OpTypePointer].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeForwardPointer(StorageClass storageClass)
+{
+ // Caching/uniquifying doesn't work here, because we don't know the
+ // pointee type and there can be multiple forward pointers of the same
+ // storage type. Somebody higher up in the stack must keep track.
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeForwardPointer);
+ type->addImmediateOperand(storageClass);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)storageClass &&
+ type->getIdOperand(1) == pointee)
+ return type->getResultId();
+ }
+
+ type = new Instruction(forwardPointerType, NoType, OpTypePointer);
+ type->addImmediateOperand(storageClass);
+ type->addIdOperand(pointee);
+ groupedTypes[OpTypePointer].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeIntegerType(int width, bool hasSign)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
+ type = groupedTypes[OpTypeInt][t];
+ if (type->getImmediateOperand(0) == (unsigned)width &&
+ type->getImmediateOperand(1) == (hasSign ? 1u : 0u))
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeInt);
+ type->addImmediateOperand(width);
+ type->addImmediateOperand(hasSign ? 1 : 0);
+ groupedTypes[OpTypeInt].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (width) {
+ case 8:
+ case 16:
+ // these are currently handled by storage-type declarations and post processing
+ break;
+ case 64:
+ addCapability(CapabilityInt64);
+ break;
+ default:
+ break;
+ }
+
+ return type->getResultId();
+}
+
+Id Builder::makeFloatType(int width)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
+ type = groupedTypes[OpTypeFloat][t];
+ if (type->getImmediateOperand(0) == (unsigned)width)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeFloat);
+ type->addImmediateOperand(width);
+ groupedTypes[OpTypeFloat].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (width) {
+ case 16:
+ // currently handled by storage-type declarations and post processing
+ break;
+ case 64:
+ addCapability(CapabilityFloat64);
+ break;
+ default:
+ break;
+ }
+
+ return type->getResultId();
+}
+
+// Make a struct without checking for duplication.
+// See makeStructResultType() for non-decorated structs
+// needed as the result of some instructions, which does
+// check for duplicates.
+Id Builder::makeStructType(const std::vector<Id>& members, const char* name)
+{
+ // Don't look for previous one, because in the general case,
+ // structs can be duplicated except for decorations.
+
+ // not found, make it
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeStruct);
+ for (int op = 0; op < (int)members.size(); ++op)
+ type->addIdOperand(members[op]);
+ groupedTypes[OpTypeStruct].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ addName(type->getResultId(), name);
+
+ return type->getResultId();
+}
+
+// Make a struct for the simple results of several instructions,
+// checking for duplication.
+Id Builder::makeStructResultType(Id type0, Id type1)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeStruct].size(); ++t) {
+ type = groupedTypes[OpTypeStruct][t];
+ if (type->getNumOperands() != 2)
+ continue;
+ if (type->getIdOperand(0) != type0 ||
+ type->getIdOperand(1) != type1)
+ continue;
+ return type->getResultId();
+ }
+
+ // not found, make it
+ std::vector<spv::Id> members;
+ members.push_back(type0);
+ members.push_back(type1);
+
+ return makeStructType(members, "ResType");
+}
+
+Id Builder::makeVectorType(Id component, int size)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeVector].size(); ++t) {
+ type = groupedTypes[OpTypeVector][t];
+ if (type->getIdOperand(0) == component &&
+ type->getImmediateOperand(1) == (unsigned)size)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeVector);
+ type->addIdOperand(component);
+ type->addImmediateOperand(size);
+ groupedTypes[OpTypeVector].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeMatrixType(Id component, int cols, int rows)
+{
+ assert(cols <= maxMatrixSize && rows <= maxMatrixSize);
+
+ Id column = makeVectorType(component, rows);
+
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeMatrix].size(); ++t) {
+ type = groupedTypes[OpTypeMatrix][t];
+ if (type->getIdOperand(0) == column &&
+ type->getImmediateOperand(1) == (unsigned)cols)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeMatrix);
+ type->addIdOperand(column);
+ type->addImmediateOperand(cols);
+ groupedTypes[OpTypeMatrix].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) {
+ type = groupedTypes[OpTypeCooperativeMatrixNV][t];
+ if (type->getIdOperand(0) == component &&
+ type->getIdOperand(1) == scope &&
+ type->getIdOperand(2) == rows &&
+ type->getIdOperand(3) == cols)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV);
+ type->addIdOperand(component);
+ type->addIdOperand(scope);
+ type->addIdOperand(rows);
+ type->addIdOperand(cols);
+ groupedTypes[OpTypeCooperativeMatrixNV].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+
+// TODO: performance: track arrays per stride
+// If a stride is supplied (non-zero) make an array.
+// If no stride (0), reuse previous array types.
+// 'size' is an Id of a constant or specialization constant of the array size
+Id Builder::makeArrayType(Id element, Id sizeId, int stride)
+{
+ Instruction* type;
+ if (stride == 0) {
+ // try to find existing type
+ for (int t = 0; t < (int)groupedTypes[OpTypeArray].size(); ++t) {
+ type = groupedTypes[OpTypeArray][t];
+ if (type->getIdOperand(0) == element &&
+ type->getIdOperand(1) == sizeId)
+ return type->getResultId();
+ }
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeArray);
+ type->addIdOperand(element);
+ type->addIdOperand(sizeId);
+ groupedTypes[OpTypeArray].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeRuntimeArray(Id element)
+{
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeRuntimeArray);
+ type->addIdOperand(element);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeFunctionType(Id returnType, const std::vector<Id>& paramTypes)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeFunction].size(); ++t) {
+ type = groupedTypes[OpTypeFunction][t];
+ if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1)
+ continue;
+ bool mismatch = false;
+ for (int p = 0; p < (int)paramTypes.size(); ++p) {
+ if (paramTypes[p] != type->getIdOperand(p + 1)) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeFunction);
+ type->addIdOperand(returnType);
+ for (int p = 0; p < (int)paramTypes.size(); ++p)
+ type->addIdOperand(paramTypes[p]);
+ groupedTypes[OpTypeFunction].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format)
+{
+ assert(sampled == 1 || sampled == 2);
+
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeImage].size(); ++t) {
+ type = groupedTypes[OpTypeImage][t];
+ if (type->getIdOperand(0) == sampledType &&
+ type->getImmediateOperand(1) == (unsigned int)dim &&
+ type->getImmediateOperand(2) == ( depth ? 1u : 0u) &&
+ type->getImmediateOperand(3) == (arrayed ? 1u : 0u) &&
+ type->getImmediateOperand(4) == ( ms ? 1u : 0u) &&
+ type->getImmediateOperand(5) == sampled &&
+ type->getImmediateOperand(6) == (unsigned int)format)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeImage);
+ type->addIdOperand(sampledType);
+ type->addImmediateOperand( dim);
+ type->addImmediateOperand( depth ? 1 : 0);
+ type->addImmediateOperand(arrayed ? 1 : 0);
+ type->addImmediateOperand( ms ? 1 : 0);
+ type->addImmediateOperand(sampled);
+ type->addImmediateOperand((unsigned int)format);
+
+ groupedTypes[OpTypeImage].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (dim) {
+ case DimBuffer:
+ if (sampled == 1)
+ addCapability(CapabilitySampledBuffer);
+ else
+ addCapability(CapabilityImageBuffer);
+ break;
+ case Dim1D:
+ if (sampled == 1)
+ addCapability(CapabilitySampled1D);
+ else
+ addCapability(CapabilityImage1D);
+ break;
+ case DimCube:
+ if (arrayed) {
+ if (sampled == 1)
+ addCapability(CapabilitySampledCubeArray);
+ else
+ addCapability(CapabilityImageCubeArray);
+ }
+ break;
+ case DimRect:
+ if (sampled == 1)
+ addCapability(CapabilitySampledRect);
+ else
+ addCapability(CapabilityImageRect);
+ break;
+ case DimSubpassData:
+ addCapability(CapabilityInputAttachment);
+ break;
+ default:
+ break;
+ }
+
+ if (ms) {
+ if (sampled == 2) {
+ // Images used with subpass data are not storage
+ // images, so don't require the capability for them.
+ if (dim != Dim::DimSubpassData)
+ addCapability(CapabilityStorageImageMultisample);
+ if (arrayed)
+ addCapability(CapabilityImageMSArray);
+ }
+ }
+
+ return type->getResultId();
+}
+
+Id Builder::makeSampledImageType(Id imageType)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeSampledImage].size(); ++t) {
+ type = groupedTypes[OpTypeSampledImage][t];
+ if (type->getIdOperand(0) == imageType)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeSampledImage);
+ type->addIdOperand(imageType);
+
+ groupedTypes[OpTypeSampledImage].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+#ifdef NV_EXTENSIONS
+Id Builder::makeAccelerationStructureNVType()
+{
+ Instruction *type;
+ if (groupedTypes[OpTypeAccelerationStructureNV].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeAccelerationStructureNV);
+ groupedTypes[OpTypeAccelerationStructureNV].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else {
+ type = groupedTypes[OpTypeAccelerationStructureNV].back();
+ }
+
+ return type->getResultId();
+}
+#endif
+Id Builder::getDerefTypeId(Id resultId) const
+{
+ Id typeId = getTypeId(resultId);
+ assert(isPointerType(typeId));
+
+ return module.getInstruction(typeId)->getIdOperand(1);
+}
+
+Op Builder::getMostBasicTypeClass(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ return getMostBasicTypeClass(instr->getIdOperand(0));
+ case OpTypePointer:
+ return getMostBasicTypeClass(instr->getIdOperand(1));
+ default:
+ return typeClass;
+ }
+}
+
+int Builder::getNumTypeConstituents(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ switch (instr->getOpCode())
+ {
+ case OpTypeBool:
+ case OpTypeInt:
+ case OpTypeFloat:
+ case OpTypePointer:
+ return 1;
+ case OpTypeVector:
+ case OpTypeMatrix:
+ return instr->getImmediateOperand(1);
+ case OpTypeArray:
+ {
+ Id lengthId = instr->getIdOperand(1);
+ return module.getInstruction(lengthId)->getImmediateOperand(0);
+ }
+ case OpTypeStruct:
+ return instr->getNumOperands();
+ case OpTypeCooperativeMatrixNV:
+ // has only one constituent when used with OpCompositeConstruct.
+ return 1;
+ default:
+ assert(0);
+ return 1;
+ }
+}
+
+// Return the lowest-level type of scalar that an homogeneous composite is made out of.
+// Typically, this is just to find out if something is made out of ints or floats.
+// However, it includes returning a structure, if say, it is an array of structure.
+Id Builder::getScalarTypeId(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVoid:
+ case OpTypeBool:
+ case OpTypeInt:
+ case OpTypeFloat:
+ case OpTypeStruct:
+ return instr->getResultId();
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ case OpTypePointer:
+ return getScalarTypeId(getContainedTypeId(typeId));
+ default:
+ assert(0);
+ return NoResult;
+ }
+}
+
+// Return the type of 'member' of a composite.
+Id Builder::getContainedTypeId(Id typeId, int member) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ case OpTypeCooperativeMatrixNV:
+ return instr->getIdOperand(0);
+ case OpTypePointer:
+ return instr->getIdOperand(1);
+ case OpTypeStruct:
+ return instr->getIdOperand(member);
+ default:
+ assert(0);
+ return NoResult;
+ }
+}
+
+// Return the immediately contained type of a given composite type.
+Id Builder::getContainedTypeId(Id typeId) const
+{
+ return getContainedTypeId(typeId, 0);
+}
+
+// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp'
+// of width 'width'. The 'width' is only consumed for int and float types.
+// Returns false otherwise.
+bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const
+{
+ const Instruction& instr = *module.getInstruction(typeId);
+
+ Op typeClass = instr.getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeInt:
+ case OpTypeFloat:
+ return typeClass == typeOp && instr.getImmediateOperand(0) == width;
+ case OpTypeStruct:
+ for (int m = 0; m < instr.getNumOperands(); ++m) {
+ if (containsType(instr.getIdOperand(m), typeOp, width))
+ return true;
+ }
+ return false;
+ case OpTypePointer:
+ return false;
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ return containsType(getContainedTypeId(typeId), typeOp, width);
+ default:
+ return typeClass == typeOp;
+ }
+}
+
+// return true if the type is a pointer to PhysicalStorageBufferEXT or an
+// array of such pointers. These require restrict/aliased decorations.
+bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const
+{
+ const Instruction& instr = *module.getInstruction(typeId);
+
+ Op typeClass = instr.getOpCode();
+ switch (typeClass)
+ {
+ case OpTypePointer:
+ return getTypeStorageClass(typeId) == StorageClassPhysicalStorageBufferEXT;
+ case OpTypeArray:
+ return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId));
+ default:
+ return false;
+ }
+}
+
+// See if a scalar constant of this type has already been created, so it
+// can be reused rather than duplicated. (Required by the specification).
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value)
+{
+ Instruction* constant;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+ if (constant->getOpCode() == opcode &&
+ constant->getTypeId() == typeId &&
+ constant->getImmediateOperand(0) == value)
+ return constant->getResultId();
+ }
+
+ return 0;
+}
+
+// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64').
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2)
+{
+ Instruction* constant;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+ if (constant->getOpCode() == opcode &&
+ constant->getTypeId() == typeId &&
+ constant->getImmediateOperand(0) == v1 &&
+ constant->getImmediateOperand(1) == v2)
+ return constant->getResultId();
+ }
+
+ return 0;
+}
+
+// Return true if consuming 'opcode' means consuming a constant.
+// "constant" here means after final transform to executable code,
+// the value consumed will be a constant, so includes specialization.
+bool Builder::isConstantOpCode(Op opcode) const
+{
+ switch (opcode) {
+ case OpUndef:
+ case OpConstantTrue:
+ case OpConstantFalse:
+ case OpConstant:
+ case OpConstantComposite:
+ case OpConstantSampler:
+ case OpConstantNull:
+ case OpSpecConstantTrue:
+ case OpSpecConstantFalse:
+ case OpSpecConstant:
+ case OpSpecConstantComposite:
+ case OpSpecConstantOp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Return true if consuming 'opcode' means consuming a specialization constant.
+bool Builder::isSpecConstantOpCode(Op opcode) const
+{
+ switch (opcode) {
+ case OpSpecConstantTrue:
+ case OpSpecConstantFalse:
+ case OpSpecConstant:
+ case OpSpecConstantComposite:
+ case OpSpecConstantOp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Id Builder::makeBoolConstant(bool b, bool specConstant)
+{
+ Id typeId = makeBoolType();
+ Instruction* constant;
+ Op opcode = specConstant ? (b ? OpSpecConstantTrue : OpSpecConstantFalse) : (b ? OpConstantTrue : OpConstantFalse);
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = 0;
+ for (int i = 0; i < (int)groupedConstants[OpTypeBool].size(); ++i) {
+ constant = groupedConstants[OpTypeBool][i];
+ if (constant->getTypeId() == typeId && constant->getOpCode() == opcode)
+ existing = constant->getResultId();
+ }
+
+ if (existing)
+ return existing;
+ }
+
+ // Make it
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeBool].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeInt, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeInt].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+ unsigned op1 = value & 0xFFFFFFFF;
+ unsigned op2 = value >> 32;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeInt, opcode, typeId, op1, op2);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(op1);
+ c->addImmediateOperand(op2);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeInt].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFloatConstant(float f, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(32);
+ union { float fl; unsigned int ui; } u;
+ u.fl = f;
+ unsigned value = u.ui;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeDoubleConstant(double d, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(64);
+ union { double db; unsigned long long ull; } u;
+ u.db = d;
+ unsigned long long value = u.ull;
+ unsigned op1 = value & 0xFFFFFFFF;
+ unsigned op2 = value >> 32;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, op1, op2);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(op1);
+ c->addImmediateOperand(op2);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFloat16Constant(float f16, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(16);
+
+ spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(f16);
+ spvutils::HexFloat<spvutils::FloatProxy<spvutils::Float16>> f16Val(0);
+ fVal.castTo(f16Val, spvutils::kRoundToZero);
+
+ unsigned value = f16Val.value().getAsFloat().get_value();
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (!specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFpConstant(Id type, double d, bool specConstant)
+{
+ assert(isFloatType(type));
+
+ switch (getScalarTypeWidth(type)) {
+ case 16:
+ return makeFloat16Constant((float)d, specConstant);
+ case 32:
+ return makeFloatConstant((float)d, specConstant);
+ case 64:
+ return makeDoubleConstant(d, specConstant);
+ default:
+ break;
+ }
+
+ assert(false);
+ return NoResult;
+}
+
+Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
+{
+ Instruction* constant = 0;
+ bool found = false;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+
+ if (constant->getTypeId() != typeId)
+ continue;
+
+ // same contents?
+ bool mismatch = false;
+ for (int op = 0; op < constant->getNumOperands(); ++op) {
+ if (constant->getIdOperand(op) != comps[op]) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch) {
+ found = true;
+ break;
+ }
+ }
+
+ return found ? constant->getResultId() : NoResult;
+}
+
+Id Builder::findStructConstant(Id typeId, const std::vector<Id>& comps)
+{
+ Instruction* constant = 0;
+ bool found = false;
+ for (int i = 0; i < (int)groupedStructConstants[typeId].size(); ++i) {
+ constant = groupedStructConstants[typeId][i];
+
+ // same contents?
+ bool mismatch = false;
+ for (int op = 0; op < constant->getNumOperands(); ++op) {
+ if (constant->getIdOperand(op) != comps[op]) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch) {
+ found = true;
+ break;
+ }
+ }
+
+ return found ? constant->getResultId() : NoResult;
+}
+
+// Comments in header
+Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstantComposite : OpConstantComposite;
+ assert(typeId);
+ Op typeClass = getTypeClass(typeId);
+
+ switch (typeClass) {
+ case OpTypeVector:
+ case OpTypeArray:
+ case OpTypeMatrix:
+ case OpTypeCooperativeMatrixNV:
+ if (! specConstant) {
+ Id existing = findCompositeConstant(typeClass, typeId, members);
+ if (existing)
+ return existing;
+ }
+ break;
+ case OpTypeStruct:
+ if (! specConstant) {
+ Id existing = findStructConstant(typeId, members);
+ if (existing)
+ return existing;
+ }
+ break;
+ default:
+ assert(0);
+ return makeFloatConstant(0.0);
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ for (int op = 0; op < (int)members.size(); ++op)
+ c->addIdOperand(members[op]);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ if (typeClass == OpTypeStruct)
+ groupedStructConstants[typeId].push_back(c);
+ else
+ groupedConstants[typeClass].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name)
+{
+ Instruction* entryPoint = new Instruction(OpEntryPoint);
+ entryPoint->addImmediateOperand(model);
+ entryPoint->addIdOperand(function->getId());
+ entryPoint->addStringOperand(name);
+
+ entryPoints.push_back(std::unique_ptr<Instruction>(entryPoint));
+
+ return entryPoint;
+}
+
+// Currently relying on the fact that all 'value' of interest are small non-negative values.
+void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3)
+{
+ Instruction* instr = new Instruction(OpExecutionMode);
+ instr->addIdOperand(entryPoint->getId());
+ instr->addImmediateOperand(mode);
+ if (value1 >= 0)
+ instr->addImmediateOperand(value1);
+ if (value2 >= 0)
+ instr->addImmediateOperand(value2);
+ if (value3 >= 0)
+ instr->addImmediateOperand(value3);
+
+ executionModes.push_back(std::unique_ptr<Instruction>(instr));
+}
+
+void Builder::addName(Id id, const char* string)
+{
+ Instruction* name = new Instruction(OpName);
+ name->addIdOperand(id);
+ name->addStringOperand(string);
+
+ names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addMemberName(Id id, int memberNumber, const char* string)
+{
+ Instruction* name = new Instruction(OpMemberName);
+ name->addIdOperand(id);
+ name->addImmediateOperand(memberNumber);
+ name->addStringOperand(string);
+
+ names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addDecoration(Id id, Decoration decoration, int num)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorate);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ if (num >= 0)
+ dec->addImmediateOperand(num);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addDecoration(Id id, Decoration decoration, const char* s)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorateStringGOOGLE);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ dec->addStringOperand(s);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addDecorationId(Id id, Decoration decoration, Id idDecoration)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorateId);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ dec->addIdOperand(idDecoration);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpMemberDecorate);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(member);
+ dec->addImmediateOperand(decoration);
+ if (num >= 0)
+ dec->addImmediateOperand(num);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const char *s)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpMemberDecorateStringGOOGLE);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(member);
+ dec->addImmediateOperand(decoration);
+ dec->addStringOperand(s);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+// Comments in header
+Function* Builder::makeEntryPoint(const char* entryPoint)
+{
+ assert(! entryPointFunction);
+
+ Block* entry;
+ std::vector<Id> params;
+ std::vector<std::vector<Decoration>> decorations;
+
+ entryPointFunction = makeFunctionEntry(NoPrecision, makeVoidType(), entryPoint, params, decorations, &entry);
+
+ return entryPointFunction;
+}
+
+// Comments in header
+Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name,
+ const std::vector<Id>& paramTypes, const std::vector<std::vector<Decoration>>& decorations, Block **entry)
+{
+ // Make the function and initial instructions in it
+ Id typeId = makeFunctionType(returnType, paramTypes);
+ Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size());
+ Function* function = new Function(getUniqueId(), returnType, typeId, firstParamId, module);
+
+ // Set up the precisions
+ setPrecision(function->getId(), precision);
+ for (unsigned p = 0; p < (unsigned)decorations.size(); ++p) {
+ for (int d = 0; d < (int)decorations[p].size(); ++d)
+ addDecoration(firstParamId + p, decorations[p][d]);
+ }
+
+ // CFG
+ if (entry) {
+ *entry = new Block(getUniqueId(), *function);
+ function->addBlock(*entry);
+ setBuildPoint(*entry);
+ }
+
+ if (name)
+ addName(function->getId(), name);
+
+ functions.push_back(std::unique_ptr<Function>(function));
+
+ return function;
+}
+
+// Comments in header
+void Builder::makeReturn(bool implicit, Id retVal)
+{
+ if (retVal) {
+ Instruction* inst = new Instruction(NoResult, NoType, OpReturnValue);
+ inst->addIdOperand(retVal);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+ } else
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(NoResult, NoType, OpReturn)));
+
+ if (! implicit)
+ createAndSetNoPredecessorBlock("post-return");
+}
+
+// Comments in header
+void Builder::leaveFunction()
+{
+ Block* block = buildPoint;
+ Function& function = buildPoint->getParent();
+ assert(block);
+
+ // If our function did not contain a return, add a return void now.
+ if (! block->isTerminated()) {
+ if (function.getReturnType() == makeVoidType())
+ makeReturn(true);
+ else {
+ makeReturn(true, createUndefined(function.getReturnType()));
+ }
+ }
+}
+
+// Comments in header
+void Builder::makeDiscard()
+{
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(OpKill)));
+ createAndSetNoPredecessorBlock("post-discard");
+}
+
+// Comments in header
+Id Builder::createVariable(StorageClass storageClass, Id type, const char* name)
+{
+ Id pointerType = makePointer(storageClass, type);
+ Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable);
+ inst->addImmediateOperand(storageClass);
+
+ switch (storageClass) {
+ case StorageClassFunction:
+ // Validation rules require the declaration in the entry block
+ buildPoint->getParent().addLocalVariable(std::unique_ptr<Instruction>(inst));
+ break;
+
+ default:
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
+ module.mapInstruction(inst);
+ break;
+ }
+
+ if (name)
+ addName(inst->getResultId(), name);
+
+ return inst->getResultId();
+}
+
+// Comments in header
+Id Builder::createUndefined(Id type)
+{
+ Instruction* inst = new Instruction(getUniqueId(), type, OpUndef);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+ return inst->getResultId();
+}
+
+// av/vis/nonprivate are unnecessary and illegal for some storage classes.
+spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const
+{
+ switch (sc) {
+ case spv::StorageClassUniform:
+ case spv::StorageClassWorkgroup:
+ case spv::StorageClassStorageBuffer:
+ case spv::StorageClassPhysicalStorageBufferEXT:
+ break;
+ default:
+ memoryAccess = spv::MemoryAccessMask(memoryAccess &
+ ~(spv::MemoryAccessMakePointerAvailableKHRMask |
+ spv::MemoryAccessMakePointerVisibleKHRMask |
+ spv::MemoryAccessNonPrivatePointerKHRMask));
+ break;
+ }
+ return memoryAccess;
+}
+
+// Comments in header
+void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Instruction* store = new Instruction(OpStore);
+ store->addIdOperand(lValue);
+ store->addIdOperand(rValue);
+
+ memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
+
+ if (memoryAccess != MemoryAccessMaskNone) {
+ store->addImmediateOperand(memoryAccess);
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ store->addImmediateOperand(alignment);
+ }
+ if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) {
+ store->addIdOperand(makeUintConstant(scope));
+ }
+ }
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
+}
+
+// Comments in header
+Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
+ load->addIdOperand(lValue);
+
+ memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
+
+ if (memoryAccess != MemoryAccessMaskNone) {
+ load->addImmediateOperand(memoryAccess);
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ load->addImmediateOperand(alignment);
+ }
+ if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) {
+ load->addIdOperand(makeUintConstant(scope));
+ }
+ }
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
+
+ return load->getResultId();
+}
+
+// Comments in header
+Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vector<Id>& offsets)
+{
+ // Figure out the final resulting type.
+ spv::Id typeId = getTypeId(base);
+ assert(isPointerType(typeId) && offsets.size() > 0);
+ typeId = getContainedTypeId(typeId);
+ for (int i = 0; i < (int)offsets.size(); ++i) {
+ if (isStructType(typeId)) {
+ assert(isConstantScalar(offsets[i]));
+ typeId = getContainedTypeId(typeId, getConstantScalar(offsets[i]));
+ } else
+ typeId = getContainedTypeId(typeId, offsets[i]);
+ }
+ typeId = makePointer(storageClass, typeId);
+
+ // Make the instruction
+ Instruction* chain = new Instruction(getUniqueId(), typeId, OpAccessChain);
+ chain->addIdOperand(base);
+ for (int i = 0; i < (int)offsets.size(); ++i)
+ chain->addIdOperand(offsets[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(chain));
+
+ return chain->getResultId();
+}
+
+Id Builder::createArrayLength(Id base, unsigned int member)
+{
+ spv::Id intType = makeUintType(32);
+ Instruction* length = new Instruction(getUniqueId(), intType, OpArrayLength);
+ length->addIdOperand(base);
+ length->addImmediateOperand(member);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
+
+ return length->getResultId();
+}
+
+Id Builder::createCooperativeMatrixLength(Id type)
+{
+ spv::Id intType = makeUintType(32);
+
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector<Id>(1, type), std::vector<Id>());
+ }
+
+ Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV);
+ length->addIdOperand(type);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
+
+ return length->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), std::vector<Id>(1, index));
+ }
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+ extract->addIdOperand(composite);
+ extract->addImmediateOperand(index);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), indexes);
+ }
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+ extract->addIdOperand(composite);
+ for (int i = 0; i < (int)indexes.size(); ++i)
+ extract->addImmediateOperand(indexes[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+ insert->addIdOperand(object);
+ insert->addIdOperand(composite);
+ insert->addImmediateOperand(index);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+ insert->addIdOperand(object);
+ insert->addIdOperand(composite);
+ for (int i = 0; i < (int)indexes.size(); ++i)
+ insert->addImmediateOperand(indexes[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex)
+{
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpVectorExtractDynamic);
+ extract->addIdOperand(vector);
+ extract->addIdOperand(componentIndex);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpVectorInsertDynamic);
+ insert->addIdOperand(vector);
+ insert->addIdOperand(component);
+ insert->addIdOperand(componentIndex);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+// An opcode that has no operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode)
+{
+ Instruction* op = new Instruction(opCode);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one id operand, no result id, and no type
+void Builder::createNoResultOp(Op opCode, Id operand)
+{
+ Instruction* op = new Instruction(opCode);
+ op->addIdOperand(operand);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one or more operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
+{
+ Instruction* op = new Instruction(opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ op->addIdOperand(*it);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has multiple operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
+{
+ Instruction* op = new Instruction(opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ if (it->isId)
+ op->addIdOperand(it->word);
+ else
+ op->addImmediateOperand(it->word);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics)
+{
+ Instruction* op = new Instruction(OpControlBarrier);
+ op->addIdOperand(makeUintConstant(execution));
+ op->addIdOperand(makeUintConstant(memory));
+ op->addIdOperand(makeUintConstant(semantics));
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics)
+{
+ Instruction* op = new Instruction(OpMemoryBarrier);
+ op->addIdOperand(makeUintConstant(executionScope));
+ op->addIdOperand(makeUintConstant(memorySemantics));
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one operands, a result id, and a type
+Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(opCode, typeId, std::vector<Id>(1, operand), std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(operand);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(2);
+ operands[0] = left; operands[1] = right;
+ return createSpecConstantOp(opCode, typeId, operands, std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(left);
+ op->addIdOperand(right);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(3);
+ operands[0] = op1;
+ operands[1] = op2;
+ operands[2] = op3;
+ return createSpecConstantOp(
+ opCode, typeId, operands, std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(op1);
+ op->addIdOperand(op2);
+ op->addIdOperand(op3);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createOp(Op opCode, Id typeId, const std::vector<Id>& operands)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+ op->addIdOperand(*it);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createOp(Op opCode, Id typeId, const std::vector<IdImmediate>& operands)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ if (it->isId)
+ op->addIdOperand(it->word);
+ else
+ op->addImmediateOperand(it->word);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector<Id>& operands, const std::vector<unsigned>& literals)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp);
+ op->addImmediateOperand((unsigned) opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+ op->addIdOperand(*it);
+ for (auto it = literals.cbegin(); it != literals.cend(); ++it)
+ op->addImmediateOperand(*it);
+ module.mapInstruction(op);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createFunctionCall(spv::Function* function, const std::vector<spv::Id>& args)
+{
+ Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), OpFunctionCall);
+ op->addIdOperand(function->getId());
+ for (int a = 0; a < (int)args.size(); ++a)
+ op->addIdOperand(args[a]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+// Comments in header
+Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels)
+{
+ if (channels.size() == 1)
+ return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision);
+
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(2);
+ operands[0] = operands[1] = source;
+ return setPrecision(createSpecConstantOp(OpVectorShuffle, typeId, operands, channels), precision);
+ }
+ Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+ assert(isVector(source));
+ swizzle->addIdOperand(source);
+ swizzle->addIdOperand(source);
+ for (int i = 0; i < (int)channels.size(); ++i)
+ swizzle->addImmediateOperand(channels[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+ return setPrecision(swizzle->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels)
+{
+ if (channels.size() == 1 && getNumComponents(source) == 1)
+ return createCompositeInsert(source, target, typeId, channels.front());
+
+ Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+
+ assert(isVector(target));
+ swizzle->addIdOperand(target);
+
+ assert(getNumComponents(source) == (int)channels.size());
+ assert(isVector(source));
+ swizzle->addIdOperand(source);
+
+ // Set up an identity shuffle from the base value to the result value
+ unsigned int components[4];
+ int numTargetComponents = getNumComponents(target);
+ for (int i = 0; i < numTargetComponents; ++i)
+ components[i] = i;
+
+ // Punch in the l-value swizzle
+ for (int i = 0; i < (int)channels.size(); ++i)
+ components[channels[i]] = numTargetComponents + i;
+
+ // finish the instruction with these components selectors
+ for (int i = 0; i < numTargetComponents; ++i)
+ swizzle->addImmediateOperand(components[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+ return swizzle->getResultId();
+}
+
+// Comments in header
+void Builder::promoteScalar(Decoration precision, Id& left, Id& right)
+{
+ int direction = getNumComponents(right) - getNumComponents(left);
+
+ if (direction > 0)
+ left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right)));
+ else if (direction < 0)
+ right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left)));
+
+ return;
+}
+
+// Comments in header
+Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType)
+{
+ assert(getNumComponents(scalar) == 1);
+ assert(getTypeId(scalar) == getScalarTypeId(vectorType));
+
+ int numComponents = getNumTypeComponents(vectorType);
+ if (numComponents == 1)
+ return scalar;
+
+ Instruction* smear = nullptr;
+ if (generatingOpCodeForSpecConst) {
+ auto members = std::vector<spv::Id>(numComponents, scalar);
+ // Sometime even in spec-constant-op mode, the temporary vector created by
+ // promoting a scalar might not be a spec constant. This should depend on
+ // the scalar.
+ // e.g.:
+ // const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar;
+ // In such cases, the temporary vector created from a_front_end_const_scalar
+ // is not a spec constant vector, even though the binary operation node is marked
+ // as 'specConstant' and we are in spec-constant-op mode.
+ auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar));
+ smear = module.getInstruction(result_id);
+ } else {
+ smear = new Instruction(getUniqueId(), vectorType, OpCompositeConstruct);
+ for (int c = 0; c < numComponents; ++c)
+ smear->addIdOperand(scalar);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(smear));
+ }
+
+ return setPrecision(smear->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args)
+{
+ Instruction* inst = new Instruction(getUniqueId(), resultType, OpExtInst);
+ inst->addIdOperand(builtins);
+ inst->addImmediateOperand(entryPoint);
+ for (int arg = 0; arg < (int)args.size(); ++arg)
+ inst->addIdOperand(args[arg]);
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+
+ return inst->getResultId();
+}
+
+// Accept all parameters needed to create a texture instruction.
+// Create the correct instruction based on the inputs, and make the call.
+Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
+ bool noImplicitLod, const TextureParameters& parameters)
+{
+ static const int maxTextureArgs = 10;
+ Id texArgs[maxTextureArgs] = {};
+
+ //
+ // Set up the fixed arguments
+ //
+ int numArgs = 0;
+ bool explicitLod = false;
+ texArgs[numArgs++] = parameters.sampler;
+ texArgs[numArgs++] = parameters.coords;
+ if (parameters.Dref != NoResult)
+ texArgs[numArgs++] = parameters.Dref;
+ if (parameters.component != NoResult)
+ texArgs[numArgs++] = parameters.component;
+
+#ifdef NV_EXTENSIONS
+ if (parameters.granularity != NoResult)
+ texArgs[numArgs++] = parameters.granularity;
+ if (parameters.coarse != NoResult)
+ texArgs[numArgs++] = parameters.coarse;
+#endif
+
+ //
+ // Set up the optional arguments
+ //
+ int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
+ ++numArgs; // speculatively make room for the mask operand
+ ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand
+ if (parameters.bias) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask);
+ texArgs[numArgs++] = parameters.bias;
+ }
+ if (parameters.lod) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+ texArgs[numArgs++] = parameters.lod;
+ explicitLod = true;
+ } else if (parameters.gradX) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsGradMask);
+ texArgs[numArgs++] = parameters.gradX;
+ texArgs[numArgs++] = parameters.gradY;
+ explicitLod = true;
+ } else if (noImplicitLod && ! fetch && ! gather) {
+ // have to explicitly use lod of 0 if not allowed to have them be implicit, and
+ // we would otherwise be about to issue an implicit instruction
+ mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+ texArgs[numArgs++] = makeFloatConstant(0.0);
+ explicitLod = true;
+ }
+ if (parameters.offset) {
+ if (isConstant(parameters.offset))
+ mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetMask);
+ else {
+ addCapability(CapabilityImageGatherExtended);
+ mask = (ImageOperandsMask)(mask | ImageOperandsOffsetMask);
+ }
+ texArgs[numArgs++] = parameters.offset;
+ }
+ if (parameters.offsets) {
+ addCapability(CapabilityImageGatherExtended);
+ mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
+ texArgs[numArgs++] = parameters.offsets;
+ }
+ if (parameters.sample) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
+ texArgs[numArgs++] = parameters.sample;
+ }
+ if (parameters.lodClamp) {
+ // capability if this bit is used
+ addCapability(CapabilityMinLod);
+
+ mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
+ texArgs[numArgs++] = parameters.lodClamp;
+ }
+ if (parameters.nonprivate) {
+ mask = mask | ImageOperandsNonPrivateTexelKHRMask;
+ }
+ if (parameters.volatil) {
+ mask = mask | ImageOperandsVolatileTexelKHRMask;
+ }
+ if (mask == ImageOperandsMaskNone)
+ --numArgs; // undo speculative reservation for the mask argument
+ else
+ texArgs[optArgNum] = mask;
+
+ //
+ // Set up the instruction
+ //
+ Op opCode = OpNop; // All paths below need to set this
+ if (fetch) {
+ if (sparse)
+ opCode = OpImageSparseFetch;
+ else
+ opCode = OpImageFetch;
+#ifdef NV_EXTENSIONS
+ } else if (parameters.granularity && parameters.coarse) {
+ opCode = OpImageSampleFootprintNV;
+#endif
+ } else if (gather) {
+ if (parameters.Dref)
+ if (sparse)
+ opCode = OpImageSparseDrefGather;
+ else
+ opCode = OpImageDrefGather;
+ else
+ if (sparse)
+ opCode = OpImageSparseGather;
+ else
+ opCode = OpImageGather;
+ } else if (explicitLod) {
+ if (parameters.Dref) {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjDrefExplicitLod;
+ else
+ opCode = OpImageSampleProjDrefExplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleDrefExplicitLod;
+ else
+ opCode = OpImageSampleDrefExplicitLod;
+ } else {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjExplicitLod;
+ else
+ opCode = OpImageSampleProjExplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleExplicitLod;
+ else
+ opCode = OpImageSampleExplicitLod;
+ }
+ } else {
+ if (parameters.Dref) {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjDrefImplicitLod;
+ else
+ opCode = OpImageSampleProjDrefImplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleDrefImplicitLod;
+ else
+ opCode = OpImageSampleDrefImplicitLod;
+ } else {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjImplicitLod;
+ else
+ opCode = OpImageSampleProjImplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleImplicitLod;
+ else
+ opCode = OpImageSampleImplicitLod;
+ }
+ }
+
+ // See if the result type is expecting a smeared result.
+ // This happens when a legacy shadow*() call is made, which
+ // gets a vec4 back instead of a float.
+ Id smearedType = resultType;
+ if (! isScalarType(resultType)) {
+ switch (opCode) {
+ case OpImageSampleDrefImplicitLod:
+ case OpImageSampleDrefExplicitLod:
+ case OpImageSampleProjDrefImplicitLod:
+ case OpImageSampleProjDrefExplicitLod:
+ resultType = getScalarTypeId(resultType);
+ break;
+ default:
+ break;
+ }
+ }
+
+ Id typeId0 = 0;
+ Id typeId1 = 0;
+
+ if (sparse) {
+ typeId0 = resultType;
+ typeId1 = getDerefTypeId(parameters.texelOut);
+ resultType = makeStructResultType(typeId0, typeId1);
+ }
+
+ // Build the SPIR-V instruction
+ Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode);
+ for (int op = 0; op < optArgNum; ++op)
+ textureInst->addIdOperand(texArgs[op]);
+ if (optArgNum < numArgs)
+ textureInst->addImmediateOperand(texArgs[optArgNum]);
+ for (int op = optArgNum + 1; op < numArgs; ++op)
+ textureInst->addIdOperand(texArgs[op]);
+ setPrecision(textureInst->getResultId(), precision);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(textureInst));
+
+ Id resultId = textureInst->getResultId();
+
+ if (sparse) {
+ // set capability
+ addCapability(CapabilitySparseResidency);
+
+ // Decode the return type that was a special structure
+ createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut);
+ resultId = createCompositeExtract(resultId, typeId0, 0);
+ setPrecision(resultId, precision);
+ } else {
+ // When a smear is needed, do it, as per what was computed
+ // above when resultType was changed to a scalar type.
+ if (resultType != smearedType)
+ resultId = smearScalar(precision, resultId, smearedType);
+ }
+
+ return resultId;
+}
+
+// Comments in header
+Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult)
+{
+ // Figure out the result type
+ Id resultType = 0;
+ switch (opCode) {
+ case OpImageQuerySize:
+ case OpImageQuerySizeLod:
+ {
+ int numComponents = 0;
+ switch (getTypeDimensionality(getImageType(parameters.sampler))) {
+ case Dim1D:
+ case DimBuffer:
+ numComponents = 1;
+ break;
+ case Dim2D:
+ case DimCube:
+ case DimRect:
+ case DimSubpassData:
+ numComponents = 2;
+ break;
+ case Dim3D:
+ numComponents = 3;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+ if (isArrayedImageType(getImageType(parameters.sampler)))
+ ++numComponents;
+
+ Id intType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
+ if (numComponents == 1)
+ resultType = intType;
+ else
+ resultType = makeVectorType(intType, numComponents);
+
+ break;
+ }
+ case OpImageQueryLod:
+#ifdef AMD_EXTENSIONS
+ resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
+#else
+ resultType = makeVectorType(makeFloatType(32), 2);
+#endif
+ break;
+ case OpImageQueryLevels:
+ case OpImageQuerySamples:
+ resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ Instruction* query = new Instruction(getUniqueId(), resultType, opCode);
+ query->addIdOperand(parameters.sampler);
+ if (parameters.coords)
+ query->addIdOperand(parameters.coords);
+ if (parameters.lod)
+ query->addIdOperand(parameters.lod);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
+
+ return query->getResultId();
+}
+
+// External comments in header.
+// Operates recursively to visit the composite's hierarchy.
+Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal)
+{
+ Id boolType = makeBoolType();
+ Id valueType = getTypeId(value1);
+
+ Id resultId = NoResult;
+
+ int numConstituents = getNumTypeConstituents(valueType);
+
+ // Scalars and Vectors
+
+ if (isScalarType(valueType) || isVectorType(valueType)) {
+ assert(valueType == getTypeId(value2));
+ // These just need a single comparison, just have
+ // to figure out what it is.
+ Op op;
+ switch (getMostBasicTypeClass(valueType)) {
+ case OpTypeFloat:
+ op = equal ? OpFOrdEqual : OpFOrdNotEqual;
+ break;
+ case OpTypeInt:
+ default:
+ op = equal ? OpIEqual : OpINotEqual;
+ break;
+ case OpTypeBool:
+ op = equal ? OpLogicalEqual : OpLogicalNotEqual;
+ precision = NoPrecision;
+ break;
+ }
+
+ if (isScalarType(valueType)) {
+ // scalar
+ resultId = createBinOp(op, boolType, value1, value2);
+ } else {
+ // vector
+ resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2);
+ setPrecision(resultId, precision);
+ // reduce vector compares...
+ resultId = createUnaryOp(equal ? OpAll : OpAny, boolType, resultId);
+ }
+
+ return setPrecision(resultId, precision);
+ }
+
+ // Only structs, arrays, and matrices should be left.
+ // They share in common the reduction operation across their constituents.
+ assert(isAggregateType(valueType) || isMatrixType(valueType));
+
+ // Compare each pair of constituents
+ for (int constituent = 0; constituent < numConstituents; ++constituent) {
+ std::vector<unsigned> indexes(1, constituent);
+ Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent);
+ Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent);
+ Id constituent1 = createCompositeExtract(value1, constituentType1, indexes);
+ Id constituent2 = createCompositeExtract(value2, constituentType2, indexes);
+
+ Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal);
+
+ if (constituent == 0)
+ resultId = subResultId;
+ else
+ resultId = setPrecision(createBinOp(equal ? OpLogicalAnd : OpLogicalOr, boolType, resultId, subResultId), precision);
+ }
+
+ return resultId;
+}
+
+// OpCompositeConstruct
+Id Builder::createCompositeConstruct(Id typeId, const std::vector<Id>& constituents)
+{
+ assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 && getNumTypeConstituents(typeId) == (int)constituents.size()));
+
+ if (generatingOpCodeForSpecConst) {
+ // Sometime, even in spec-constant-op mode, the constant composite to be
+ // constructed may not be a specialization constant.
+ // e.g.:
+ // const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const);
+ // The first column vector should be a spec constant one, as a_spec_const is a spec constant.
+ // The second column vector should NOT be spec constant, as it does not contain any spec constants.
+ // To handle such cases, we check the constituents of the constant vector to determine whether this
+ // vector should be created as a spec constant.
+ return makeCompositeConstant(typeId, constituents,
+ std::any_of(constituents.begin(), constituents.end(),
+ [&](spv::Id id) { return isSpecConstant(id); }));
+ }
+
+ Instruction* op = new Instruction(getUniqueId(), typeId, OpCompositeConstruct);
+ for (int c = 0; c < (int)constituents.size(); ++c)
+ op->addIdOperand(constituents[c]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+// Vector or scalar constructor
+Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+ Id result = NoResult;
+ unsigned int numTargetComponents = getNumTypeComponents(resultTypeId);
+ unsigned int targetComponent = 0;
+
+ // Special case: when calling a vector constructor with a single scalar
+ // argument, smear the scalar
+ if (sources.size() == 1 && isScalar(sources[0]) && numTargetComponents > 1)
+ return smearScalar(precision, sources[0], resultTypeId);
+
+ // accumulate the arguments for OpCompositeConstruct
+ std::vector<Id> constituents;
+ Id scalarTypeId = getScalarTypeId(resultTypeId);
+
+ // lambda to store the result of visiting an argument component
+ const auto latchResult = [&](Id comp) {
+ if (numTargetComponents > 1)
+ constituents.push_back(comp);
+ else
+ result = comp;
+ ++targetComponent;
+ };
+
+ // lambda to visit a vector argument's components
+ const auto accumulateVectorConstituents = [&](Id sourceArg) {
+ unsigned int sourceSize = getNumComponents(sourceArg);
+ unsigned int sourcesToUse = sourceSize;
+ if (sourcesToUse + targetComponent > numTargetComponents)
+ sourcesToUse = numTargetComponents - targetComponent;
+
+ for (unsigned int s = 0; s < sourcesToUse; ++s) {
+ std::vector<unsigned> swiz;
+ swiz.push_back(s);
+ latchResult(createRvalueSwizzle(precision, scalarTypeId, sourceArg, swiz));
+ }
+ };
+
+ // lambda to visit a matrix argument's components
+ const auto accumulateMatrixConstituents = [&](Id sourceArg) {
+ unsigned int sourceSize = getNumColumns(sourceArg) * getNumRows(sourceArg);
+ unsigned int sourcesToUse = sourceSize;
+ if (sourcesToUse + targetComponent > numTargetComponents)
+ sourcesToUse = numTargetComponents - targetComponent;
+
+ int col = 0;
+ int row = 0;
+ for (unsigned int s = 0; s < sourcesToUse; ++s) {
+ if (row >= getNumRows(sourceArg)) {
+ row = 0;
+ col++;
+ }
+ std::vector<Id> indexes;
+ indexes.push_back(col);
+ indexes.push_back(row);
+ latchResult(createCompositeExtract(sourceArg, scalarTypeId, indexes));
+ row++;
+ }
+ };
+
+ // Go through the source arguments, each one could have either
+ // a single or multiple components to contribute.
+ for (unsigned int i = 0; i < sources.size(); ++i) {
+
+ if (isScalar(sources[i]) || isPointer(sources[i]))
+ latchResult(sources[i]);
+ else if (isVector(sources[i]))
+ accumulateVectorConstituents(sources[i]);
+ else if (isMatrix(sources[i]))
+ accumulateMatrixConstituents(sources[i]);
+ else
+ assert(0);
+
+ if (targetComponent >= numTargetComponents)
+ break;
+ }
+
+ // If the result is a vector, make it from the gathered constituents.
+ if (constituents.size() > 0)
+ result = createCompositeConstruct(resultTypeId, constituents);
+
+ return setPrecision(result, precision);
+}
+
+// Comments in header
+Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+ Id componentTypeId = getScalarTypeId(resultTypeId);
+ int numCols = getTypeNumColumns(resultTypeId);
+ int numRows = getTypeNumRows(resultTypeId);
+
+ Instruction* instr = module.getInstruction(componentTypeId);
+ unsigned bitCount = instr->getImmediateOperand(0);
+
+ // Optimize matrix constructed from a bigger matrix
+ if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
+ // To truncate the matrix to a smaller number of rows/columns, we need to:
+ // 1. For each column, extract the column and truncate it to the required size using shuffle
+ // 2. Assemble the resulting matrix from all columns
+ Id matrix = sources[0];
+ Id columnTypeId = getContainedTypeId(resultTypeId);
+ Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix));
+
+ std::vector<unsigned> channels;
+ for (int row = 0; row < numRows; ++row)
+ channels.push_back(row);
+
+ std::vector<Id> matrixColumns;
+ for (int col = 0; col < numCols; ++col) {
+ std::vector<unsigned> indexes;
+ indexes.push_back(col);
+ Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes);
+ setPrecision(colv, precision);
+
+ if (numRows != getNumRows(matrix)) {
+ matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels));
+ } else {
+ matrixColumns.push_back(colv);
+ }
+ }
+
+ return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
+ }
+
+ // Otherwise, will use a two step process
+ // 1. make a compile-time 2D array of values
+ // 2. construct a matrix from that array
+
+ // Step 1.
+
+ // initialize the array to the identity matrix
+ Id ids[maxMatrixSize][maxMatrixSize];
+ Id one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0));
+ Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0));
+ for (int col = 0; col < 4; ++col) {
+ for (int row = 0; row < 4; ++row) {
+ if (col == row)
+ ids[col][row] = one;
+ else
+ ids[col][row] = zero;
+ }
+ }
+
+ // modify components as dictated by the arguments
+ if (sources.size() == 1 && isScalar(sources[0])) {
+ // a single scalar; resets the diagonals
+ for (int col = 0; col < 4; ++col)
+ ids[col][col] = sources[0];
+ } else if (isMatrix(sources[0])) {
+ // constructing from another matrix; copy over the parts that exist in both the argument and constructee
+ Id matrix = sources[0];
+ int minCols = std::min(numCols, getNumColumns(matrix));
+ int minRows = std::min(numRows, getNumRows(matrix));
+ for (int col = 0; col < minCols; ++col) {
+ std::vector<unsigned> indexes;
+ indexes.push_back(col);
+ for (int row = 0; row < minRows; ++row) {
+ indexes.push_back(row);
+ ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes);
+ indexes.pop_back();
+ setPrecision(ids[col][row], precision);
+ }
+ }
+ } else {
+ // fill in the matrix in column-major order with whatever argument components are available
+ int row = 0;
+ int col = 0;
+
+ for (int arg = 0; arg < (int)sources.size(); ++arg) {
+ Id argComp = sources[arg];
+ for (int comp = 0; comp < getNumComponents(sources[arg]); ++comp) {
+ if (getNumComponents(sources[arg]) > 1) {
+ argComp = createCompositeExtract(sources[arg], componentTypeId, comp);
+ setPrecision(argComp, precision);
+ }
+ ids[col][row++] = argComp;
+ if (row == numRows) {
+ row = 0;
+ col++;
+ }
+ }
+ }
+ }
+
+ // Step 2: Construct a matrix from that array.
+ // First make the column vectors, then make the matrix.
+
+ // make the column vectors
+ Id columnTypeId = getContainedTypeId(resultTypeId);
+ std::vector<Id> matrixColumns;
+ for (int col = 0; col < numCols; ++col) {
+ std::vector<Id> vectorComponents;
+ for (int row = 0; row < numRows; ++row)
+ vectorComponents.push_back(ids[col][row]);
+ Id column = createCompositeConstruct(columnTypeId, vectorComponents);
+ setPrecision(column, precision);
+ matrixColumns.push_back(column);
+ }
+
+ // make the matrix
+ return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
+}
+
+// Comments in header
+Builder::If::If(Id cond, unsigned int ctrl, Builder& gb) :
+ builder(gb),
+ condition(cond),
+ control(ctrl),
+ elseBlock(0)
+{
+ function = &builder.getBuildPoint()->getParent();
+
+ // make the blocks, but only put the then-block into the function,
+ // the else-block and merge-block will be added later, in order, after
+ // earlier code is emitted
+ thenBlock = new Block(builder.getUniqueId(), *function);
+ mergeBlock = new Block(builder.getUniqueId(), *function);
+
+ // Save the current block, so that we can add in the flow control split when
+ // makeEndIf is called.
+ headerBlock = builder.getBuildPoint();
+
+ function->addBlock(thenBlock);
+ builder.setBuildPoint(thenBlock);
+}
+
+// Comments in header
+void Builder::If::makeBeginElse()
+{
+ // Close out the "then" by having it jump to the mergeBlock
+ builder.createBranch(mergeBlock);
+
+ // Make the first else block and add it to the function
+ elseBlock = new Block(builder.getUniqueId(), *function);
+ function->addBlock(elseBlock);
+
+ // Start building the else block
+ builder.setBuildPoint(elseBlock);
+}
+
+// Comments in header
+void Builder::If::makeEndIf()
+{
+ // jump to the merge block
+ builder.createBranch(mergeBlock);
+
+ // Go back to the headerBlock and make the flow control split
+ builder.setBuildPoint(headerBlock);
+ builder.createSelectionMerge(mergeBlock, control);
+ if (elseBlock)
+ builder.createConditionalBranch(condition, thenBlock, elseBlock);
+ else
+ builder.createConditionalBranch(condition, thenBlock, mergeBlock);
+
+ // add the merge block to the function
+ function->addBlock(mergeBlock);
+ builder.setBuildPoint(mergeBlock);
+}
+
+// Comments in header
+void Builder::makeSwitch(Id selector, unsigned int control, int numSegments, const std::vector<int>& caseValues,
+ const std::vector<int>& valueIndexToSegment, int defaultSegment,
+ std::vector<Block*>& segmentBlocks)
+{
+ Function& function = buildPoint->getParent();
+
+ // make all the blocks
+ for (int s = 0; s < numSegments; ++s)
+ segmentBlocks.push_back(new Block(getUniqueId(), function));
+
+ Block* mergeBlock = new Block(getUniqueId(), function);
+
+ // make and insert the switch's selection-merge instruction
+ createSelectionMerge(mergeBlock, control);
+
+ // make the switch instruction
+ Instruction* switchInst = new Instruction(NoResult, NoType, OpSwitch);
+ switchInst->addIdOperand(selector);
+ auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock;
+ switchInst->addIdOperand(defaultOrMerge->getId());
+ defaultOrMerge->addPredecessor(buildPoint);
+ for (int i = 0; i < (int)caseValues.size(); ++i) {
+ switchInst->addImmediateOperand(caseValues[i]);
+ switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId());
+ segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(switchInst));
+
+ // push the merge block
+ switchMerges.push(mergeBlock);
+}
+
+// Comments in header
+void Builder::addSwitchBreak()
+{
+ // branch to the top of the merge block stack
+ createBranch(switchMerges.top());
+ createAndSetNoPredecessorBlock("post-switch-break");
+}
+
+// Comments in header
+void Builder::nextSwitchSegment(std::vector<Block*>& segmentBlock, int nextSegment)
+{
+ int lastSegment = nextSegment - 1;
+ if (lastSegment >= 0) {
+ // Close out previous segment by jumping, if necessary, to next segment
+ if (! buildPoint->isTerminated())
+ createBranch(segmentBlock[nextSegment]);
+ }
+ Block* block = segmentBlock[nextSegment];
+ block->getParent().addBlock(block);
+ setBuildPoint(block);
+}
+
+// Comments in header
+void Builder::endSwitch(std::vector<Block*>& /*segmentBlock*/)
+{
+ // Close out previous segment by jumping, if necessary, to next segment
+ if (! buildPoint->isTerminated())
+ addSwitchBreak();
+
+ switchMerges.top()->getParent().addBlock(switchMerges.top());
+ setBuildPoint(switchMerges.top());
+
+ switchMerges.pop();
+}
+
+Block& Builder::makeNewBlock()
+{
+ Function& function = buildPoint->getParent();
+ auto block = new Block(getUniqueId(), function);
+ function.addBlock(block);
+ return *block;
+}
+
+Builder::LoopBlocks& Builder::makeNewLoop()
+{
+ // This verbosity is needed to simultaneously get the same behavior
+ // everywhere (id's in the same order), have a syntax that works
+ // across lots of versions of C++, have no warnings from pedantic
+ // compilation modes, and leave the rest of the code alone.
+ Block& head = makeNewBlock();
+ Block& body = makeNewBlock();
+ Block& merge = makeNewBlock();
+ Block& continue_target = makeNewBlock();
+ LoopBlocks blocks(head, body, merge, continue_target);
+ loops.push(blocks);
+ return loops.top();
+}
+
+void Builder::createLoopContinue()
+{
+ createBranch(&loops.top().continue_target);
+ // Set up a block for dead code.
+ createAndSetNoPredecessorBlock("post-loop-continue");
+}
+
+void Builder::createLoopExit()
+{
+ createBranch(&loops.top().merge);
+ // Set up a block for dead code.
+ createAndSetNoPredecessorBlock("post-loop-break");
+}
+
+void Builder::closeLoop()
+{
+ loops.pop();
+}
+
+void Builder::clearAccessChain()
+{
+ accessChain.base = NoResult;
+ accessChain.indexChain.clear();
+ accessChain.instr = NoResult;
+ accessChain.swizzle.clear();
+ accessChain.component = NoResult;
+ accessChain.preSwizzleBaseType = NoType;
+ accessChain.isRValue = false;
+ accessChain.coherentFlags.clear();
+ accessChain.alignment = 0;
+}
+
+// Comments in header
+void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+{
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+
+ // swizzles can be stacked in GLSL, but simplified to a single
+ // one here; the base type doesn't change
+ if (accessChain.preSwizzleBaseType == NoType)
+ accessChain.preSwizzleBaseType = preSwizzleBaseType;
+
+ // if needed, propagate the swizzle for the current access chain
+ if (accessChain.swizzle.size() > 0) {
+ std::vector<unsigned> oldSwizzle = accessChain.swizzle;
+ accessChain.swizzle.resize(0);
+ for (unsigned int i = 0; i < swizzle.size(); ++i) {
+ assert(swizzle[i] < oldSwizzle.size());
+ accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]);
+ }
+ } else
+ accessChain.swizzle = swizzle;
+
+ // determine if we need to track this swizzle anymore
+ simplifyAccessChainSwizzle();
+}
+
+// Comments in header
+void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ assert(accessChain.isRValue == false);
+
+ transferAccessChainSwizzle(true);
+ Id base = collapseAccessChain();
+ Id source = rvalue;
+
+ // dynamic component should be gone
+ assert(accessChain.component == NoResult);
+
+ // If swizzle still exists, it is out-of-order or not full, we must load the target vector,
+ // extract and insert elements to perform writeMask and/or swizzle.
+ if (accessChain.swizzle.size() > 0) {
+ Id tempBaseId = createLoad(base);
+ source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
+ }
+
+ // take LSB of alignment
+ alignment = alignment & ~(alignment & (alignment-1));
+ if (getStorageClass(base) == StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ createStore(source, base, memoryAccess, scope, alignment);
+}
+
+// Comments in header
+Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Id id;
+
+ if (accessChain.isRValue) {
+ // transfer access chain, but try to stay in registers
+ transferAccessChainSwizzle(false);
+ if (accessChain.indexChain.size() > 0) {
+ Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType;
+
+ // if all the accesses are constants, we can use OpCompositeExtract
+ std::vector<unsigned> indexes;
+ bool constant = true;
+ for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) {
+ if (isConstantScalar(accessChain.indexChain[i]))
+ indexes.push_back(getConstantScalar(accessChain.indexChain[i]));
+ else {
+ constant = false;
+ break;
+ }
+ }
+
+ if (constant) {
+ id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
+ } else {
+ // make a new function variable for this r-value
+ Id lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
+
+ // store into it
+ createStore(accessChain.base, lValue);
+
+ // move base to the new variable
+ accessChain.base = lValue;
+ accessChain.isRValue = false;
+
+ // load through the access chain
+ id = createLoad(collapseAccessChain());
+ }
+ setPrecision(id, precision);
+ } else
+ id = accessChain.base; // no precision, it was set when this was defined
+ } else {
+ transferAccessChainSwizzle(true);
+
+ // take LSB of alignment
+ alignment = alignment & ~(alignment & (alignment-1));
+ if (getStorageClass(accessChain.base) == StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ // load through the access chain
+ id = createLoad(collapseAccessChain(), memoryAccess, scope, alignment);
+ setPrecision(id, precision);
+ addDecoration(id, nonUniform);
+ }
+
+ // Done, unless there are swizzles to do
+ if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+ return id;
+
+ // Do remaining swizzling
+
+ // Do the basic swizzle
+ if (accessChain.swizzle.size() > 0) {
+ Id swizzledType = getScalarTypeId(getTypeId(id));
+ if (accessChain.swizzle.size() > 1)
+ swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size());
+ id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle);
+ }
+
+ // Do the dynamic component
+ if (accessChain.component != NoResult)
+ id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision);
+
+ addDecoration(id, nonUniform);
+ return id;
+}
+
+Id Builder::accessChainGetLValue()
+{
+ assert(accessChain.isRValue == false);
+
+ transferAccessChainSwizzle(true);
+ Id lvalue = collapseAccessChain();
+
+ // If swizzle exists, it is out-of-order or not full, we must load the target vector,
+ // extract and insert elements to perform writeMask and/or swizzle. This does not
+ // go with getting a direct l-value pointer.
+ assert(accessChain.swizzle.size() == 0);
+ assert(accessChain.component == NoResult);
+
+ return lvalue;
+}
+
+// comment in header
+Id Builder::accessChainGetInferredType()
+{
+ // anything to operate on?
+ if (accessChain.base == NoResult)
+ return NoType;
+ Id type = getTypeId(accessChain.base);
+
+ // do initial dereference
+ if (! accessChain.isRValue)
+ type = getContainedTypeId(type);
+
+ // dereference each index
+ for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) {
+ if (isStructType(type))
+ type = getContainedTypeId(type, getConstantScalar(*it));
+ else
+ type = getContainedTypeId(type);
+ }
+
+ // dereference swizzle
+ if (accessChain.swizzle.size() == 1)
+ type = getContainedTypeId(type);
+ else if (accessChain.swizzle.size() > 1)
+ type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size());
+
+ // dereference component selection
+ if (accessChain.component)
+ type = getContainedTypeId(type);
+
+ return type;
+}
+
+void Builder::dump(std::vector<unsigned int>& out) const
+{
+ // Header, before first instructions:
+ out.push_back(MagicNumber);
+ out.push_back(spvVersion);
+ out.push_back(builderNumber);
+ out.push_back(uniqueId + 1);
+ out.push_back(0);
+
+ // Capabilities
+ for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) {
+ Instruction capInst(0, 0, OpCapability);
+ capInst.addImmediateOperand(*it);
+ capInst.dump(out);
+ }
+
+ for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) {
+ Instruction extInst(0, 0, OpExtension);
+ extInst.addStringOperand(it->c_str());
+ extInst.dump(out);
+ }
+
+ dumpInstructions(out, imports);
+ Instruction memInst(0, 0, OpMemoryModel);
+ memInst.addImmediateOperand(addressModel);
+ memInst.addImmediateOperand(memoryModel);
+ memInst.dump(out);
+
+ // Instructions saved up while building:
+ dumpInstructions(out, entryPoints);
+ dumpInstructions(out, executionModes);
+
+ // Debug instructions
+ dumpInstructions(out, strings);
+ dumpSourceInstructions(out);
+ for (int e = 0; e < (int)sourceExtensions.size(); ++e) {
+ Instruction sourceExtInst(0, 0, OpSourceExtension);
+ sourceExtInst.addStringOperand(sourceExtensions[e]);
+ sourceExtInst.dump(out);
+ }
+ dumpInstructions(out, names);
+ dumpModuleProcesses(out);
+
+ // Annotation instructions
+ dumpInstructions(out, decorations);
+
+ dumpInstructions(out, constantsTypesGlobals);
+ dumpInstructions(out, externals);
+
+ // The functions
+ module.dump(out);
+}
+
+//
+// Protected methods.
+//
+
+// Turn the described access chain in 'accessChain' into an instruction(s)
+// computing its address. This *cannot* include complex swizzles, which must
+// be handled after this is called.
+//
+// Can generate code.
+Id Builder::collapseAccessChain()
+{
+ assert(accessChain.isRValue == false);
+
+ // did we already emit an access chain for this?
+ if (accessChain.instr != NoResult)
+ return accessChain.instr;
+
+ // If we have a dynamic component, we can still transfer
+ // that into a final operand to the access chain. We need to remap the
+ // dynamic component through the swizzle to get a new dynamic component to
+ // update.
+ //
+ // This was not done in transferAccessChainSwizzle() because it might
+ // generate code.
+ remapDynamicSwizzle();
+ if (accessChain.component != NoResult) {
+ // transfer the dynamic component to the access chain
+ accessChain.indexChain.push_back(accessChain.component);
+ accessChain.component = NoResult;
+ }
+
+ // note that non-trivial swizzling is left pending
+
+ // do we have an access chain?
+ if (accessChain.indexChain.size() == 0)
+ return accessChain.base;
+
+ // emit the access chain
+ StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base));
+ accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain);
+
+ return accessChain.instr;
+}
+
+// For a dynamic component selection of a swizzle.
+//
+// Turn the swizzle and dynamic component into just a dynamic component.
+//
+// Generates code.
+void Builder::remapDynamicSwizzle()
+{
+ // do we have a swizzle to remap a dynamic component through?
+ if (accessChain.component != NoResult && accessChain.swizzle.size() > 1) {
+ // build a vector of the swizzle for the component to map into
+ std::vector<Id> components;
+ for (int c = 0; c < (int)accessChain.swizzle.size(); ++c)
+ components.push_back(makeUintConstant(accessChain.swizzle[c]));
+ Id mapType = makeVectorType(makeUintType(32), (int)accessChain.swizzle.size());
+ Id map = makeCompositeConstant(mapType, components);
+
+ // use it
+ accessChain.component = createVectorExtractDynamic(map, makeUintType(32), accessChain.component);
+ accessChain.swizzle.clear();
+ }
+}
+
+// clear out swizzle if it is redundant, that is reselecting the same components
+// that would be present without the swizzle.
+void Builder::simplifyAccessChainSwizzle()
+{
+ // If the swizzle has fewer components than the vector, it is subsetting, and must stay
+ // to preserve that fact.
+ if (getNumTypeComponents(accessChain.preSwizzleBaseType) > (int)accessChain.swizzle.size())
+ return;
+
+ // if components are out of order, it is a swizzle
+ for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) {
+ if (i != accessChain.swizzle[i])
+ return;
+ }
+
+ // otherwise, there is no need to track this swizzle
+ accessChain.swizzle.clear();
+ if (accessChain.component == NoResult)
+ accessChain.preSwizzleBaseType = NoType;
+}
+
+// To the extent any swizzling can become part of the chain
+// of accesses instead of a post operation, make it so.
+// If 'dynamic' is true, include transferring the dynamic component,
+// otherwise, leave it pending.
+//
+// Does not generate code. just updates the access chain.
+void Builder::transferAccessChainSwizzle(bool dynamic)
+{
+ // non existent?
+ if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+ return;
+
+ // too complex?
+ // (this requires either a swizzle, or generating code for a dynamic component)
+ if (accessChain.swizzle.size() > 1)
+ return;
+
+ // single component, either in the swizzle and/or dynamic component
+ if (accessChain.swizzle.size() == 1) {
+ assert(accessChain.component == NoResult);
+ // handle static component selection
+ accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front()));
+ accessChain.swizzle.clear();
+ accessChain.preSwizzleBaseType = NoType;
+ } else if (dynamic && accessChain.component != NoResult) {
+ assert(accessChain.swizzle.size() == 0);
+ // handle dynamic component
+ accessChain.indexChain.push_back(accessChain.component);
+ accessChain.preSwizzleBaseType = NoType;
+ accessChain.component = NoResult;
+ }
+}
+
+// Utility method for creating a new block and setting the insert point to
+// be in it. This is useful for flow-control operations that need a "dummy"
+// block proceeding them (e.g. instructions after a discard, etc).
+void Builder::createAndSetNoPredecessorBlock(const char* /*name*/)
+{
+ Block* block = new Block(getUniqueId(), buildPoint->getParent());
+ block->setUnreachable();
+ buildPoint->getParent().addBlock(block);
+ setBuildPoint(block);
+
+ // if (name)
+ // addName(block->getId(), name);
+}
+
+// Comments in header
+void Builder::createBranch(Block* block)
+{
+ Instruction* branch = new Instruction(OpBranch);
+ branch->addIdOperand(block->getId());
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+ block->addPredecessor(buildPoint);
+}
+
+void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control)
+{
+ Instruction* merge = new Instruction(OpSelectionMerge);
+ merge->addIdOperand(mergeBlock->getId());
+ merge->addImmediateOperand(control);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control,
+ unsigned int dependencyLength)
+{
+ Instruction* merge = new Instruction(OpLoopMerge);
+ merge->addIdOperand(mergeBlock->getId());
+ merge->addIdOperand(continueBlock->getId());
+ merge->addImmediateOperand(control);
+ if ((control & LoopControlDependencyLengthMask) != 0)
+ merge->addImmediateOperand(dependencyLength);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock)
+{
+ Instruction* branch = new Instruction(OpBranchConditional);
+ branch->addIdOperand(condition);
+ branch->addIdOperand(thenBlock->getId());
+ branch->addIdOperand(elseBlock->getId());
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+ thenBlock->addPredecessor(buildPoint);
+ elseBlock->addPredecessor(buildPoint);
+}
+
+// OpSource
+// [OpSourceContinued]
+// ...
+void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text,
+ std::vector<unsigned int>& out) const
+{
+ const int maxWordCount = 0xFFFF;
+ const int opSourceWordCount = 4;
+ const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1;
+
+ if (source != SourceLanguageUnknown) {
+ // OpSource Language Version File Source
+ Instruction sourceInst(NoResult, NoType, OpSource);
+ sourceInst.addImmediateOperand(source);
+ sourceInst.addImmediateOperand(sourceVersion);
+ // File operand
+ if (fileId != NoResult) {
+ sourceInst.addIdOperand(fileId);
+ // Source operand
+ if (text.size() > 0) {
+ int nextByte = 0;
+ std::string subString;
+ while ((int)text.size() - nextByte > 0) {
+ subString = text.substr(nextByte, nonNullBytesPerInstruction);
+ if (nextByte == 0) {
+ // OpSource
+ sourceInst.addStringOperand(subString.c_str());
+ sourceInst.dump(out);
+ } else {
+ // OpSourcContinued
+ Instruction sourceContinuedInst(OpSourceContinued);
+ sourceContinuedInst.addStringOperand(subString.c_str());
+ sourceContinuedInst.dump(out);
+ }
+ nextByte += nonNullBytesPerInstruction;
+ }
+ } else
+ sourceInst.dump(out);
+ } else
+ sourceInst.dump(out);
+ }
+}
+
+// Dump an OpSource[Continued] sequence for the source and every include file
+void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
+{
+ dumpSourceInstructions(sourceFileStringId, sourceText, out);
+ for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr)
+ dumpSourceInstructions(iItr->first, *iItr->second, out);
+}
+
+void Builder::dumpInstructions(std::vector<unsigned int>& out, const std::vector<std::unique_ptr<Instruction> >& instructions) const
+{
+ for (int i = 0; i < (int)instructions.size(); ++i) {
+ instructions[i]->dump(out);
+ }
+}
+
+void Builder::dumpModuleProcesses(std::vector<unsigned int>& out) const
+{
+ for (int i = 0; i < (int)moduleProcesses.size(); ++i) {
+ Instruction moduleProcessed(OpModuleProcessed);
+ moduleProcessed.addStringOperand(moduleProcesses[i]);
+ moduleProcessed.dump(out);
+ }
+}
+
+}; // end spv namespace
diff --git a/src/3rdparty/glslang/SPIRV/SpvBuilder.h b/src/3rdparty/glslang/SPIRV/SpvBuilder.h
new file mode 100644
index 0000000..52f7fba
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SpvBuilder.h
@@ -0,0 +1,749 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// "Builder" is an interface to fully build SPIR-V IR. Allocate one of
+// these to build (a thread safe) internal SPIR-V representation (IR),
+// and then dump it as a binary stream according to the SPIR-V specification.
+//
+// A Builder has a 1:1 relationship with a SPIR-V module.
+//
+
+#pragma once
+#ifndef SpvBuilder_H
+#define SpvBuilder_H
+
+#include "Logger.h"
+#include "spirv.hpp"
+#include "spvIR.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <stack>
+#include <unordered_map>
+#include <map>
+
+namespace spv {
+
+class Builder {
+public:
+ Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
+ virtual ~Builder();
+
+ static const int maxMatrixSize = 4;
+
+ unsigned int getSpvVersion() const { return spvVersion; }
+
+ void setSource(spv::SourceLanguage lang, int version)
+ {
+ source = lang;
+ sourceVersion = version;
+ }
+ spv::Id getStringId(const std::string& str)
+ {
+ auto sItr = stringIds.find(str);
+ if (sItr != stringIds.end())
+ return sItr->second;
+ spv::Id strId = getUniqueId();
+ Instruction* fileString = new Instruction(strId, NoType, OpString);
+ const char* file_c_str = str.c_str();
+ fileString->addStringOperand(file_c_str);
+ strings.push_back(std::unique_ptr<Instruction>(fileString));
+ stringIds[file_c_str] = strId;
+ return strId;
+ }
+ void setSourceFile(const std::string& file)
+ {
+ sourceFileStringId = getStringId(file);
+ }
+ void setSourceText(const std::string& text) { sourceText = text; }
+ void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
+ void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
+ void setEmitOpLines() { emitOpLines = true; }
+ void addExtension(const char* ext) { extensions.insert(ext); }
+ void addInclude(const std::string& name, const std::string& text)
+ {
+ spv::Id incId = getStringId(name);
+ includeFiles[incId] = &text;
+ }
+ Id import(const char*);
+ void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
+ {
+ addressModel = addr;
+ memoryModel = mem;
+ }
+
+ void addCapability(spv::Capability cap) { capabilities.insert(cap); }
+
+ // To get a new <id> for anything needing a new one.
+ Id getUniqueId() { return ++uniqueId; }
+
+ // To get a set of new <id>s, e.g., for a set of function parameters
+ Id getUniqueIds(int numIds)
+ {
+ Id id = uniqueId + 1;
+ uniqueId += numIds;
+ return id;
+ }
+
+ // Generate OpLine for non-filename-based #line directives (ie no filename
+ // seen yet): Log the current line, and if different than the last one,
+ // issue a new OpLine using the new line and current source file name.
+ void setLine(int line);
+
+ // If filename null, generate OpLine for non-filename-based line directives,
+ // else do filename-based: Log the current line and file, and if different
+ // than the last one, issue a new OpLine using the new line and file
+ // name.
+ void setLine(int line, const char* filename);
+ // Low-level OpLine. See setLine() for a layered helper.
+ void addLine(Id fileName, int line, int column);
+
+ // For creating new types (will return old type if the requested one was already made).
+ Id makeVoidType();
+ Id makeBoolType();
+ Id makePointer(StorageClass, Id pointee);
+ Id makeForwardPointer(StorageClass);
+ Id makePointerFromForwardPointer(StorageClass, Id forwardPointerType, Id pointee);
+ Id makeIntegerType(int width, bool hasSign); // generic
+ Id makeIntType(int width) { return makeIntegerType(width, true); }
+ Id makeUintType(int width) { return makeIntegerType(width, false); }
+ Id makeFloatType(int width);
+ Id makeStructType(const std::vector<Id>& members, const char*);
+ Id makeStructResultType(Id type0, Id type1);
+ Id makeVectorType(Id component, int size);
+ Id makeMatrixType(Id component, int cols, int rows);
+ Id makeArrayType(Id element, Id sizeId, int stride); // 0 stride means no stride decoration
+ Id makeRuntimeArray(Id element);
+ Id makeFunctionType(Id returnType, const std::vector<Id>& paramTypes);
+ Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
+ Id makeSamplerType();
+ Id makeSampledImageType(Id imageType);
+ Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols);
+
+ // accelerationStructureNV type
+ Id makeAccelerationStructureNVType();
+
+ // For querying about types.
+ Id getTypeId(Id resultId) const { return module.getTypeId(resultId); }
+ Id getDerefTypeId(Id resultId) const;
+ Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); }
+ Op getTypeClass(Id typeId) const { return getOpCode(typeId); }
+ Op getMostBasicTypeClass(Id typeId) const;
+ int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); }
+ int getNumTypeConstituents(Id typeId) const;
+ int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); }
+ Id getScalarTypeId(Id typeId) const;
+ Id getContainedTypeId(Id typeId) const;
+ Id getContainedTypeId(Id typeId, int) const;
+ StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); }
+ ImageFormat getImageTypeFormat(Id typeId) const { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); }
+
+ bool isPointer(Id resultId) const { return isPointerType(getTypeId(resultId)); }
+ bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); }
+ bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); }
+ bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); }
+ bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); }
+ bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); }
+ bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
+
+ bool isBoolType(Id typeId) { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); }
+ bool isIntType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; }
+ bool isUintType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; }
+ bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; }
+ bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; }
+ bool isScalarType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || getTypeClass(typeId) == OpTypeBool; }
+ bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; }
+ bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
+ bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
+ bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
+ bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
+ bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
+ bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
+ bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
+ bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
+ bool containsType(Id typeId, Op typeOp, unsigned int width) const;
+ bool containsPhysicalStorageBufferOrArray(Id typeId) const;
+
+ bool isConstantOpCode(Op opcode) const;
+ bool isSpecConstantOpCode(Op opcode) const;
+ bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); }
+ bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; }
+ bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); }
+ unsigned int getConstantScalar(Id resultId) const { return module.getInstruction(resultId)->getImmediateOperand(0); }
+ StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); }
+
+ int getScalarTypeWidth(Id typeId) const
+ {
+ Id scalarTypeId = getScalarTypeId(typeId);
+ assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat);
+ return module.getInstruction(scalarTypeId)->getImmediateOperand(0);
+ }
+
+ int getTypeNumColumns(Id typeId) const
+ {
+ assert(isMatrixType(typeId));
+ return getNumTypeConstituents(typeId);
+ }
+ int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); }
+ int getTypeNumRows(Id typeId) const
+ {
+ assert(isMatrixType(typeId));
+ return getNumTypeComponents(getContainedTypeId(typeId));
+ }
+ int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); }
+
+ Dim getTypeDimensionality(Id typeId) const
+ {
+ assert(isImageType(typeId));
+ return (Dim)module.getInstruction(typeId)->getImmediateOperand(1);
+ }
+ Id getImageType(Id resultId) const
+ {
+ Id typeId = getTypeId(resultId);
+ assert(isImageType(typeId) || isSampledImageType(typeId));
+ return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId;
+ }
+ bool isArrayedImageType(Id typeId) const
+ {
+ assert(isImageType(typeId));
+ return module.getInstruction(typeId)->getImmediateOperand(3) != 0;
+ }
+
+ // For making new constants (will return old constant if the requested one was already made).
+ Id makeBoolConstant(bool b, bool specConstant = false);
+ Id makeInt8Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(8), (unsigned)i, specConstant); }
+ Id makeUint8Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(8), u, specConstant); }
+ Id makeInt16Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(16), (unsigned)i, specConstant); }
+ Id makeUint16Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(16), u, specConstant); }
+ Id makeIntConstant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); }
+ Id makeUintConstant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(32), u, specConstant); }
+ Id makeInt64Constant(long long i, bool specConstant = false) { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); }
+ Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); }
+ Id makeFloatConstant(float f, bool specConstant = false);
+ Id makeDoubleConstant(double d, bool specConstant = false);
+ Id makeFloat16Constant(float f16, bool specConstant = false);
+ Id makeFpConstant(Id type, double d, bool specConstant = false);
+
+ // Turn the array of constants into a proper spv constant of the requested type.
+ Id makeCompositeConstant(Id type, const std::vector<Id>& comps, bool specConst = false);
+
+ // Methods for adding information outside the CFG.
+ Instruction* addEntryPoint(ExecutionModel, Function*, const char* name);
+ void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1);
+ void addName(Id, const char* name);
+ void addMemberName(Id, int member, const char* name);
+ void addDecoration(Id, Decoration, int num = -1);
+ void addDecoration(Id, Decoration, const char*);
+ void addDecorationId(Id id, Decoration, Id idDecoration);
+ void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1);
+ void addMemberDecoration(Id, unsigned int member, Decoration, const char*);
+
+ // At the end of what block do the next create*() instructions go?
+ void setBuildPoint(Block* bp) { buildPoint = bp; }
+ Block* getBuildPoint() const { return buildPoint; }
+
+ // Make the entry-point function. The returned pointer is only valid
+ // for the lifetime of this builder.
+ Function* makeEntryPoint(const char*);
+
+ // Make a shader-style function, and create its entry block if entry is non-zero.
+ // Return the function, pass back the entry.
+ // The returned pointer is only valid for the lifetime of this builder.
+ Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, const std::vector<Id>& paramTypes,
+ const std::vector<std::vector<Decoration>>& precisions, Block **entry = 0);
+
+ // Create a return. An 'implicit' return is one not appearing in the source
+ // code. In the case of an implicit return, no post-return block is inserted.
+ void makeReturn(bool implicit, Id retVal = 0);
+
+ // Generate all the code needed to finish up a function.
+ void leaveFunction();
+
+ // Create a discard.
+ void makeDiscard();
+
+ // Create a global or function local or IO variable.
+ Id createVariable(StorageClass, Id type, const char* name = 0);
+
+ // Create an intermediate with an undefined value.
+ Id createUndefined(Id type);
+
+ // Store into an Id and return the l-value
+ void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // Load from an Id and return it
+ Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // Create an OpAccessChain instruction
+ Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
+
+ // Create an OpArrayLength instruction
+ Id createArrayLength(Id base, unsigned int member);
+
+ // Create an OpCooperativeMatrixLengthNV instruction
+ Id createCooperativeMatrixLength(Id type);
+
+ // Create an OpCompositeExtract instruction
+ Id createCompositeExtract(Id composite, Id typeId, unsigned index);
+ Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
+ Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index);
+ Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes);
+
+ Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex);
+ Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex);
+
+ void createNoResultOp(Op);
+ void createNoResultOp(Op, Id operand);
+ void createNoResultOp(Op, const std::vector<Id>& operands);
+ void createNoResultOp(Op, const std::vector<IdImmediate>& operands);
+ void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
+ void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
+ Id createUnaryOp(Op, Id typeId, Id operand);
+ Id createBinOp(Op, Id typeId, Id operand1, Id operand2);
+ Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
+ Id createOp(Op, Id typeId, const std::vector<Id>& operands);
+ Id createOp(Op, Id typeId, const std::vector<IdImmediate>& operands);
+ Id createFunctionCall(spv::Function*, const std::vector<spv::Id>&);
+ Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
+
+ // Take an rvalue (source) and a set of channels to extract from it to
+ // make a new rvalue, which is returned.
+ Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels);
+
+ // Take a copy of an lvalue (target) and a source of components, and set the
+ // source components into the lvalue where the 'channels' say to put them.
+ // An updated version of the target is returned.
+ // (No true lvalue or stores are used.)
+ Id createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels);
+
+ // If both the id and precision are valid, the id
+ // gets tagged with the requested precision.
+ // The passed in id is always the returned id, to simplify use patterns.
+ Id setPrecision(Id id, Decoration precision)
+ {
+ if (precision != NoPrecision && id != NoResult)
+ addDecoration(id, precision);
+
+ return id;
+ }
+
+ // Can smear a scalar to a vector for the following forms:
+ // - promoteScalar(scalar, vector) // smear scalar to width of vector
+ // - promoteScalar(vector, scalar) // smear scalar to width of vector
+ // - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to
+ // - promoteScalar(scalar, scalar) // do nothing
+ // Other forms are not allowed.
+ //
+ // Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'.
+ // The type of the created vector is a vector of components of the same type as the scalar.
+ //
+ // Note: One of the arguments will change, with the result coming back that way rather than
+ // through the return value.
+ void promoteScalar(Decoration precision, Id& left, Id& right);
+
+ // Make a value by smearing the scalar to fill the type.
+ // vectorType should be the correct type for making a vector of scalarVal.
+ // (No conversions are done.)
+ Id smearScalar(Decoration precision, Id scalarVal, Id vectorType);
+
+ // Create a call to a built-in function.
+ Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args);
+
+ // List of parameters used to create a texture operation
+ struct TextureParameters {
+ Id sampler;
+ Id coords;
+ Id bias;
+ Id lod;
+ Id Dref;
+ Id offset;
+ Id offsets;
+ Id gradX;
+ Id gradY;
+ Id sample;
+ Id component;
+ Id texelOut;
+ Id lodClamp;
+ Id granularity;
+ Id coarse;
+ bool nonprivate;
+ bool volatil;
+ };
+
+ // Select the correct texture operation based on all inputs, and emit the correct instruction
+ Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicit, const TextureParameters&);
+
+ // Emit the OpTextureQuery* instruction that was passed in.
+ // Figure out the right return value and type, and return it.
+ Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult);
+
+ Id createSamplePositionCall(Decoration precision, Id, Id);
+
+ Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned);
+ Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id);
+
+ // Reduction comparison for composites: For equal and not-equal resulting in a scalar.
+ Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */);
+
+ // OpCompositeConstruct
+ Id createCompositeConstruct(Id typeId, const std::vector<Id>& constituents);
+
+ // vector or scalar constructor
+ Id createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId);
+
+ // matrix constructor
+ Id createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id constructee);
+
+ // Helper to use for building nested control flow with if-then-else.
+ class If {
+ public:
+ If(Id condition, unsigned int ctrl, Builder& builder);
+ ~If() {}
+
+ void makeBeginElse();
+ void makeEndIf();
+
+ private:
+ If(const If&);
+ If& operator=(If&);
+
+ Builder& builder;
+ Id condition;
+ unsigned int control;
+ Function* function;
+ Block* headerBlock;
+ Block* thenBlock;
+ Block* elseBlock;
+ Block* mergeBlock;
+ };
+
+ // Make a switch statement. A switch has 'numSegments' of pieces of code, not containing
+ // any case/default labels, all separated by one or more case/default labels. Each possible
+ // case value v is a jump to the caseValues[v] segment. The defaultSegment is also in this
+ // number space. How to compute the value is given by 'condition', as in switch(condition).
+ //
+ // The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches.
+ //
+ // Use a defaultSegment < 0 if there is no default segment (to branch to post switch).
+ //
+ // Returns the right set of basic blocks to start each code segment with, so that the caller's
+ // recursion stack can hold the memory for it.
+ //
+ void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector<int>& caseValues,
+ const std::vector<int>& valueToSegment, int defaultSegment, std::vector<Block*>& segmentBB); // return argument
+
+ // Add a branch to the innermost switch's merge block.
+ void addSwitchBreak();
+
+ // Move to the next code segment, passing in the return argument in makeSwitch()
+ void nextSwitchSegment(std::vector<Block*>& segmentBB, int segment);
+
+ // Finish off the innermost switch.
+ void endSwitch(std::vector<Block*>& segmentBB);
+
+ struct LoopBlocks {
+ LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) :
+ head(head), body(body), merge(merge), continue_target(continue_target) { }
+ Block &head, &body, &merge, &continue_target;
+ private:
+ LoopBlocks();
+ LoopBlocks& operator=(const LoopBlocks&);
+ };
+
+ // Start a new loop and prepare the builder to generate code for it. Until
+ // closeLoop() is called for this loop, createLoopContinue() and
+ // createLoopExit() will target its corresponding blocks.
+ LoopBlocks& makeNewLoop();
+
+ // Create a new block in the function containing the build point. Memory is
+ // owned by the function object.
+ Block& makeNewBlock();
+
+ // Add a branch to the continue_target of the current (innermost) loop.
+ void createLoopContinue();
+
+ // Add an exit (e.g. "break") from the innermost loop that we're currently
+ // in.
+ void createLoopExit();
+
+ // Close the innermost loop that you're in
+ void closeLoop();
+
+ //
+ // Access chain design for an R-Value vs. L-Value:
+ //
+ // There is a single access chain the builder is building at
+ // any particular time. Such a chain can be used to either to a load or
+ // a store, when desired.
+ //
+ // Expressions can be r-values, l-values, or both, or only r-values:
+ // a[b.c].d = .... // l-value
+ // ... = a[b.c].d; // r-value, that also looks like an l-value
+ // ++a[b.c].d; // r-value and l-value
+ // (x + y)[2]; // r-value only, can't possibly be l-value
+ //
+ // Computing an r-value means generating code. Hence,
+ // r-values should only be computed when they are needed, not speculatively.
+ //
+ // Computing an l-value means saving away information for later use in the compiler,
+ // no code is generated until the l-value is later dereferenced. It is okay
+ // to speculatively generate an l-value, just not okay to speculatively dereference it.
+ //
+ // The base of the access chain (the left-most variable or expression
+ // from which everything is based) can be set either as an l-value
+ // or as an r-value. Most efficient would be to set an l-value if one
+ // is available. If an expression was evaluated, the resulting r-value
+ // can be set as the chain base.
+ //
+ // The users of this single access chain can save and restore if they
+ // want to nest or manage multiple chains.
+ //
+
+ struct AccessChain {
+ Id base; // for l-values, pointer to the base object, for r-values, the base object
+ std::vector<Id> indexChain;
+ Id instr; // cache the instruction that generates this access chain
+ std::vector<unsigned> swizzle; // each std::vector element selects the next GLSL component number
+ Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
+ Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
+ bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
+ unsigned int alignment; // bitwise OR of alignment values passed in. Accumulates worst alignment. Only tracks base and (optional) component selection alignment.
+
+ // Accumulate whether anything in the chain of structures has coherent decorations.
+ struct CoherentFlags {
+ unsigned coherent : 1;
+ unsigned devicecoherent : 1;
+ unsigned queuefamilycoherent : 1;
+ unsigned workgroupcoherent : 1;
+ unsigned subgroupcoherent : 1;
+ unsigned nonprivate : 1;
+ unsigned volatil : 1;
+ unsigned isImage : 1;
+
+ void clear() {
+ coherent = 0;
+ devicecoherent = 0;
+ queuefamilycoherent = 0;
+ workgroupcoherent = 0;
+ subgroupcoherent = 0;
+ nonprivate = 0;
+ volatil = 0;
+ isImage = 0;
+ }
+
+ CoherentFlags() { clear(); }
+ CoherentFlags operator |=(const CoherentFlags &other) {
+ coherent |= other.coherent;
+ devicecoherent |= other.devicecoherent;
+ queuefamilycoherent |= other.queuefamilycoherent;
+ workgroupcoherent |= other.workgroupcoherent;
+ subgroupcoherent |= other.subgroupcoherent;
+ nonprivate |= other.nonprivate;
+ volatil |= other.volatil;
+ isImage |= other.isImage;
+ return *this;
+ }
+ };
+ CoherentFlags coherentFlags;
+ };
+
+ //
+ // the SPIR-V builder maintains a single active chain that
+ // the following methods operate on
+ //
+
+ // for external save and restore
+ AccessChain getAccessChain() { return accessChain; }
+ void setAccessChain(AccessChain newChain) { accessChain = newChain; }
+
+ // clear accessChain
+ void clearAccessChain();
+
+ // set new base as an l-value base
+ void setAccessChainLValue(Id lValue)
+ {
+ assert(isPointer(lValue));
+ accessChain.base = lValue;
+ }
+
+ // set new base value as an r-value
+ void setAccessChainRValue(Id rValue)
+ {
+ accessChain.isRValue = true;
+ accessChain.base = rValue;
+ }
+
+ // push offset onto the end of the chain
+ void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+ {
+ accessChain.indexChain.push_back(offset);
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+ }
+
+ // push new swizzle onto the end of any existing swizzle, merging into a single swizzle
+ void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment);
+
+ // push a dynamic component selection onto the access chain, only applicable with a
+ // non-trivial swizzle or no swizzle
+ void accessChainPushComponent(Id component, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+ {
+ if (accessChain.swizzle.size() != 1) {
+ accessChain.component = component;
+ if (accessChain.preSwizzleBaseType == NoType)
+ accessChain.preSwizzleBaseType = preSwizzleBaseType;
+ }
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+ }
+
+ // use accessChain and swizzle to store value
+ void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // use accessChain and swizzle to load an r-value
+ Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // get the direct pointer for an l-value
+ Id accessChainGetLValue();
+
+ // Get the inferred SPIR-V type of the result of the current access chain,
+ // based on the type of the base and the chain of dereferences.
+ Id accessChainGetInferredType();
+
+ // Add capabilities, extensions, remove unneeded decorations, etc.,
+ // based on the resulting SPIR-V.
+ void postProcess();
+
+ // Hook to visit each instruction in a block in a function
+ void postProcess(Instruction&);
+ // Hook to visit each instruction in a reachable block in a function.
+ void postProcessReachable(const Instruction&);
+ // Hook to visit each non-32-bit sized float/int operation in a block.
+ void postProcessType(const Instruction&, spv::Id typeId);
+
+ void dump(std::vector<unsigned int>&) const;
+
+ void createBranch(Block* block);
+ void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
+ void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, unsigned int dependencyLength);
+
+ // Sets to generate opcode for specialization constants.
+ void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
+ // Sets to generate opcode for non-specialization constants (normal mode).
+ void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; }
+ // Check if the builder is generating code for spec constants.
+ bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; }
+
+ protected:
+ Id makeIntConstant(Id typeId, unsigned value, bool specConstant);
+ Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
+ Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value);
+ Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2);
+ Id findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps);
+ Id findStructConstant(Id typeId, const std::vector<Id>& comps);
+ Id collapseAccessChain();
+ void remapDynamicSwizzle();
+ void transferAccessChainSwizzle(bool dynamic);
+ void simplifyAccessChainSwizzle();
+ void createAndSetNoPredecessorBlock(const char*);
+ void createSelectionMerge(Block* mergeBlock, unsigned int control);
+ void dumpSourceInstructions(std::vector<unsigned int>&) const;
+ void dumpSourceInstructions(const spv::Id fileId, const std::string& text, std::vector<unsigned int>&) const;
+ void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
+ void dumpModuleProcesses(std::vector<unsigned int>&) const;
+ spv::MemoryAccessMask sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const;
+
+ unsigned int spvVersion; // the version of SPIR-V to emit in the header
+ SourceLanguage source;
+ int sourceVersion;
+ spv::Id sourceFileStringId;
+ std::string sourceText;
+ int currentLine;
+ const char* currentFile;
+ bool emitOpLines;
+ std::set<std::string> extensions;
+ std::vector<const char*> sourceExtensions;
+ std::vector<const char*> moduleProcesses;
+ AddressingModel addressModel;
+ MemoryModel memoryModel;
+ std::set<spv::Capability> capabilities;
+ int builderNumber;
+ Module module;
+ Block* buildPoint;
+ Id uniqueId;
+ Function* entryPointFunction;
+ bool generatingOpCodeForSpecConst;
+ AccessChain accessChain;
+
+ // special blocks of instructions for output
+ std::vector<std::unique_ptr<Instruction> > strings;
+ std::vector<std::unique_ptr<Instruction> > imports;
+ std::vector<std::unique_ptr<Instruction> > entryPoints;
+ std::vector<std::unique_ptr<Instruction> > executionModes;
+ std::vector<std::unique_ptr<Instruction> > names;
+ std::vector<std::unique_ptr<Instruction> > decorations;
+ std::vector<std::unique_ptr<Instruction> > constantsTypesGlobals;
+ std::vector<std::unique_ptr<Instruction> > externals;
+ std::vector<std::unique_ptr<Function> > functions;
+
+ // not output, internally used for quick & dirty canonical (unique) creation
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedConstants; // map type opcodes to constant inst.
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedStructConstants; // map struct-id to constant instructions
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedTypes; // map type opcodes to type instructions
+
+ // stack of switches
+ std::stack<Block*> switchMerges;
+
+ // Our loop stack.
+ std::stack<LoopBlocks> loops;
+
+ // map from strings to their string ids
+ std::unordered_map<std::string, spv::Id> stringIds;
+
+ // map from include file name ids to their contents
+ std::map<spv::Id, const std::string*> includeFiles;
+
+ // The stream for outputting warnings and errors.
+ SpvBuildLogger* logger;
+}; // end Builder class
+
+}; // end spv namespace
+
+#endif // SpvBuilder_H
diff --git a/src/3rdparty/glslang/SPIRV/SpvPostProcess.cpp b/src/3rdparty/glslang/SPIRV/SpvPostProcess.cpp
new file mode 100644
index 0000000..80471ca
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SpvPostProcess.cpp
@@ -0,0 +1,389 @@
+//
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Post-processing for SPIR-V IR, in internal form, not standard binary form.
+//
+
+#include <cassert>
+#include <cstdlib>
+
+#include <unordered_set>
+#include <algorithm>
+
+#include "SpvBuilder.h"
+
+#include "spirv.hpp"
+#include "GlslangToSpv.h"
+#include "SpvBuilder.h"
+namespace spv {
+ #include "GLSL.std.450.h"
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+}
+
+namespace spv {
+
+// Hook to visit each operand type and result type of an instruction.
+// Will be called multiple times for one instruction, once for each typed
+// operand and the result.
+void Builder::postProcessType(const Instruction& inst, Id typeId)
+{
+ // Characterize the type being questioned
+ Id basicTypeOp = getMostBasicTypeClass(typeId);
+ int width = 0;
+ if (basicTypeOp == OpTypeFloat || basicTypeOp == OpTypeInt)
+ width = getScalarTypeWidth(typeId);
+
+ // Do opcode-specific checks
+ switch (inst.getOpCode()) {
+ case OpLoad:
+ case OpStore:
+ if (basicTypeOp == OpTypeStruct) {
+ if (containsType(typeId, OpTypeInt, 8))
+ addCapability(CapabilityInt8);
+ if (containsType(typeId, OpTypeInt, 16))
+ addCapability(CapabilityInt16);
+ if (containsType(typeId, OpTypeFloat, 16))
+ addCapability(CapabilityFloat16);
+ } else {
+ StorageClass storageClass = getStorageClass(inst.getIdOperand(0));
+ if (width == 8) {
+ switch (storageClass) {
+ case StorageClassPhysicalStorageBufferEXT:
+ case StorageClassUniform:
+ case StorageClassStorageBuffer:
+ case StorageClassPushConstant:
+ break;
+ default:
+ addCapability(CapabilityInt8);
+ break;
+ }
+ } else if (width == 16) {
+ switch (storageClass) {
+ case StorageClassPhysicalStorageBufferEXT:
+ case StorageClassUniform:
+ case StorageClassStorageBuffer:
+ case StorageClassPushConstant:
+ case StorageClassInput:
+ case StorageClassOutput:
+ break;
+ default:
+ if (basicTypeOp == OpTypeInt)
+ addCapability(CapabilityInt16);
+ if (basicTypeOp == OpTypeFloat)
+ addCapability(CapabilityFloat16);
+ break;
+ }
+ }
+ }
+ break;
+ case OpAccessChain:
+ case OpPtrAccessChain:
+ case OpCopyObject:
+ case OpFConvert:
+ case OpSConvert:
+ case OpUConvert:
+ break;
+ case OpExtInst:
+#if AMD_EXTENSIONS
+ switch (inst.getImmediateOperand(1)) {
+ case GLSLstd450Frexp:
+ case GLSLstd450FrexpStruct:
+ if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeInt, 16))
+ addExtension(spv::E_SPV_AMD_gpu_shader_int16);
+ break;
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeFloat, 16))
+ addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+ break;
+ default:
+ break;
+ }
+#endif
+ break;
+ default:
+ if (basicTypeOp == OpTypeFloat && width == 16)
+ addCapability(CapabilityFloat16);
+ if (basicTypeOp == OpTypeInt && width == 16)
+ addCapability(CapabilityInt16);
+ if (basicTypeOp == OpTypeInt && width == 8)
+ addCapability(CapabilityInt8);
+ break;
+ }
+}
+
+// Called for each instruction that resides in a block.
+void Builder::postProcess(Instruction& inst)
+{
+ // Add capabilities based simply on the opcode.
+ switch (inst.getOpCode()) {
+ case OpExtInst:
+ switch (inst.getImmediateOperand(1)) {
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ addCapability(CapabilityInterpolationFunction);
+ break;
+ default:
+ break;
+ }
+ break;
+ case OpDPdxFine:
+ case OpDPdyFine:
+ case OpFwidthFine:
+ case OpDPdxCoarse:
+ case OpDPdyCoarse:
+ case OpFwidthCoarse:
+ addCapability(CapabilityDerivativeControl);
+ break;
+
+ case OpImageQueryLod:
+ case OpImageQuerySize:
+ case OpImageQuerySizeLod:
+ case OpImageQuerySamples:
+ case OpImageQueryLevels:
+ addCapability(CapabilityImageQuery);
+ break;
+
+#ifdef NV_EXTENSIONS
+ case OpGroupNonUniformPartitionNV:
+ addExtension(E_SPV_NV_shader_subgroup_partitioned);
+ addCapability(CapabilityGroupNonUniformPartitionedNV);
+ break;
+#endif
+
+ case OpLoad:
+ case OpStore:
+ {
+ // For any load/store to a PhysicalStorageBufferEXT, walk the accesschain
+ // index list to compute the misalignment. The pre-existing alignment value
+ // (set via Builder::AccessChain::alignment) only accounts for the base of
+ // the reference type and any scalar component selection in the accesschain,
+ // and this function computes the rest from the SPIR-V Offset decorations.
+ Instruction *accessChain = module.getInstruction(inst.getIdOperand(0));
+ if (accessChain->getOpCode() == OpAccessChain) {
+ Instruction *base = module.getInstruction(accessChain->getIdOperand(0));
+ // Get the type of the base of the access chain. It must be a pointer type.
+ Id typeId = base->getTypeId();
+ Instruction *type = module.getInstruction(typeId);
+ assert(type->getOpCode() == OpTypePointer);
+ if (type->getImmediateOperand(0) != StorageClassPhysicalStorageBufferEXT) {
+ break;
+ }
+ // Get the pointee type.
+ typeId = type->getIdOperand(1);
+ type = module.getInstruction(typeId);
+ // Walk the index list for the access chain. For each index, find any
+ // misalignment that can apply when accessing the member/element via
+ // Offset/ArrayStride/MatrixStride decorations, and bitwise OR them all
+ // together.
+ int alignment = 0;
+ for (int i = 1; i < accessChain->getNumOperands(); ++i) {
+ Instruction *idx = module.getInstruction(accessChain->getIdOperand(i));
+ if (type->getOpCode() == OpTypeStruct) {
+ assert(idx->getOpCode() == OpConstant);
+ unsigned int c = idx->getImmediateOperand(0);
+
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getOpCode() == OpMemberDecorate &&
+ decoration.get()->getIdOperand(0) == typeId &&
+ decoration.get()->getImmediateOperand(1) == c &&
+ (decoration.get()->getImmediateOperand(2) == DecorationOffset ||
+ decoration.get()->getImmediateOperand(2) == DecorationMatrixStride)) {
+ alignment |= decoration.get()->getImmediateOperand(3);
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ // get the next member type
+ typeId = type->getIdOperand(c);
+ type = module.getInstruction(typeId);
+ } else if (type->getOpCode() == OpTypeArray ||
+ type->getOpCode() == OpTypeRuntimeArray) {
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getOpCode() == OpDecorate &&
+ decoration.get()->getIdOperand(0) == typeId &&
+ decoration.get()->getImmediateOperand(1) == DecorationArrayStride) {
+ alignment |= decoration.get()->getImmediateOperand(2);
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ // Get the element type
+ typeId = type->getIdOperand(0);
+ type = module.getInstruction(typeId);
+ } else {
+ // Once we get to any non-aggregate type, we're done.
+ break;
+ }
+ }
+ assert(inst.getNumOperands() >= 3);
+ unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1);
+ assert(memoryAccess & MemoryAccessAlignedMask);
+ static_cast<void>(memoryAccess);
+ // Compute the index of the alignment operand.
+ int alignmentIdx = 2;
+ if (inst.getOpCode() == OpStore)
+ alignmentIdx++;
+ // Merge new and old (mis)alignment
+ alignment |= inst.getImmediateOperand(alignmentIdx);
+ // Pick the LSB
+ alignment = alignment & ~(alignment & (alignment-1));
+ // update the Aligned operand
+ inst.setImmediateOperand(alignmentIdx, alignment);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // Checks based on type
+ if (inst.getTypeId() != NoType)
+ postProcessType(inst, inst.getTypeId());
+ for (int op = 0; op < inst.getNumOperands(); ++op) {
+ if (inst.isIdOperand(op)) {
+ // In blocks, these are always result ids, but we are relying on
+ // getTypeId() to return NoType for things like OpLabel.
+ if (getTypeId(inst.getIdOperand(op)) != NoType)
+ postProcessType(inst, getTypeId(inst.getIdOperand(op)));
+ }
+ }
+}
+
+// Called for each instruction in a reachable block.
+void Builder::postProcessReachable(const Instruction&)
+{
+ // did have code here, but questionable to do so without deleting the instructions
+}
+
+// comment in header
+void Builder::postProcess()
+{
+ std::unordered_set<const Block*> reachableBlocks;
+ std::unordered_set<Id> unreachableDefinitions;
+ // Collect IDs defined in unreachable blocks. For each function, label the
+ // reachable blocks first. Then for each unreachable block, collect the
+ // result IDs of the instructions in it.
+ for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
+ Function* f = *fi;
+ Block* entry = f->getEntryBlock();
+ inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); });
+ for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
+ Block* b = *bi;
+ if (reachableBlocks.count(b) == 0) {
+ for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
+ unreachableDefinitions.insert(ii->get()->getResultId());
+ }
+ }
+ }
+
+ // Remove unneeded decorations, for unreachable instructions
+ decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
+ [&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
+ Id decoration_id = I.get()->getIdOperand(0);
+ return unreachableDefinitions.count(decoration_id) != 0;
+ }),
+ decorations.end());
+
+ // Add per-instruction capabilities, extensions, etc.,
+
+ // process all reachable instructions...
+ for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) {
+ const Block* block = *bi;
+ const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); };
+ std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function);
+ }
+
+ // process all block-contained instructions
+ for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
+ Function* f = *fi;
+ for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
+ Block* b = *bi;
+ for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
+ postProcess(*ii->get());
+
+ // For all local variables that contain pointers to PhysicalStorageBufferEXT, check whether
+ // there is an existing restrict/aliased decoration. If we don't find one, add Aliased as the
+ // default.
+ for (auto vi = b->getLocalVariables().cbegin(); vi != b->getLocalVariables().cend(); vi++) {
+ const Instruction& inst = *vi->get();
+ Id resultId = inst.getResultId();
+ if (containsPhysicalStorageBufferOrArray(getDerefTypeId(resultId))) {
+ bool foundDecoration = false;
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getIdOperand(0) == resultId &&
+ decoration.get()->getOpCode() == OpDecorate &&
+ (decoration.get()->getImmediateOperand(1) == spv::DecorationAliasedPointerEXT ||
+ decoration.get()->getImmediateOperand(1) == spv::DecorationRestrictPointerEXT)) {
+ foundDecoration = true;
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ if (!foundDecoration) {
+ addDecoration(resultId, spv::DecorationAliasedPointerEXT);
+ }
+ }
+ }
+ }
+ }
+
+ // Look for any 8/16 bit type in physical storage buffer class, and set the
+ // appropriate capability. This happens in createSpvVariable for other storage
+ // classes, but there isn't always a variable for physical storage buffer.
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ Instruction* type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
+ if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
+ addExtension(spv::E_SPV_KHR_8bit_storage);
+ addCapability(spv::CapabilityStorageBuffer8BitAccess);
+ }
+ if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
+ containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
+ addExtension(spv::E_SPV_KHR_16bit_storage);
+ addCapability(spv::CapabilityStorageBuffer16BitAccess);
+ }
+ }
+ }
+}
+
+}; // end spv namespace
diff --git a/src/3rdparty/glslang/SPIRV/SpvTools.cpp b/src/3rdparty/glslang/SPIRV/SpvTools.cpp
new file mode 100644
index 0000000..cce5fa7
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SpvTools.cpp
@@ -0,0 +1,201 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Call into SPIRV-Tools to disassemble, validate, and optimize.
+//
+
+#if ENABLE_OPT
+
+#include <cstdio>
+#include <iostream>
+
+#include "SpvTools.h"
+#include "spirv-tools/optimizer.hpp"
+#include "spirv-tools/libspirv.h"
+
+namespace glslang {
+
+// Translate glslang's view of target versioning to what SPIRV-Tools uses.
+spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger)
+{
+ switch (spvVersion.vulkan) {
+ case glslang::EShTargetVulkan_1_0: return spv_target_env::SPV_ENV_VULKAN_1_0;
+ case glslang::EShTargetVulkan_1_1: return spv_target_env::SPV_ENV_VULKAN_1_1;
+ default:
+ break;
+ }
+
+ if (spvVersion.openGl > 0)
+ return spv_target_env::SPV_ENV_OPENGL_4_5;
+
+ logger->missingFunctionality("Target version for SPIRV-Tools validator");
+ return spv_target_env::SPV_ENV_UNIVERSAL_1_0;
+}
+
+
+// Use the SPIRV-Tools disassembler to print SPIR-V.
+void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv)
+{
+ // disassemble
+ spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+ spv_text text;
+ spv_diagnostic diagnostic = nullptr;
+ spvBinaryToText(context, spirv.data(), spirv.size(),
+ SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT,
+ &text, &diagnostic);
+
+ // dump
+ if (diagnostic == nullptr)
+ out << text->str;
+ else
+ spvDiagnosticPrint(diagnostic);
+
+ // teardown
+ spvDiagnosticDestroy(diagnostic);
+ spvContextDestroy(context);
+}
+
+// Apply the SPIRV-Tools validator to generated SPIR-V.
+void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger)
+{
+ // validate
+ spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
+ spv_const_binary_t binary = { spirv.data(), spirv.size() };
+ spv_diagnostic diagnostic = nullptr;
+ spv_validator_options options = spvValidatorOptionsCreate();
+ spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
+ spvValidateWithOptions(context, options, &binary, &diagnostic);
+
+ // report
+ if (diagnostic != nullptr) {
+ logger->error("SPIRV-Tools Validation Errors");
+ logger->error(diagnostic->error);
+ }
+
+ // tear down
+ spvValidatorOptionsDestroy(options);
+ spvDiagnosticDestroy(diagnostic);
+ spvContextDestroy(context);
+}
+
+// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
+// legalizing HLSL SPIR-V.
+void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*, const SpvOptions* options)
+{
+ spv_target_env target_env = SPV_ENV_UNIVERSAL_1_2;
+
+ spvtools::Optimizer optimizer(target_env);
+ optimizer.SetMessageConsumer(
+ [](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) {
+ auto &out = std::cerr;
+ switch (level)
+ {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ out << "error: ";
+ break;
+ case SPV_MSG_WARNING:
+ out << "warning: ";
+ break;
+ case SPV_MSG_INFO:
+ case SPV_MSG_DEBUG:
+ out << "info: ";
+ break;
+ default:
+ break;
+ }
+ if (source)
+ {
+ out << source << ":";
+ }
+ out << position.line << ":" << position.column << ":" << position.index << ":";
+ if (message)
+ {
+ out << " " << message;
+ }
+ out << std::endl;
+ });
+
+ // If debug (specifically source line info) is being generated, propagate
+ // line information into all SPIR-V instructions. This avoids loss of
+ // information when instructions are deleted or moved. Later, remove
+ // redundant information to minimize final SPRIR-V size.
+ if (options->generateDebugInfo) {
+ optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
+ }
+ optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
+ optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
+ optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
+ optimizer.RegisterPass(spvtools::CreateEliminateDeadFunctionsPass());
+ optimizer.RegisterPass(spvtools::CreateScalarReplacementPass());
+ optimizer.RegisterPass(spvtools::CreateLocalAccessChainConvertPass());
+ optimizer.RegisterPass(spvtools::CreateLocalSingleBlockLoadStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateLocalSingleStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateSimplificationPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
+ optimizer.RegisterPass(spvtools::CreateBlockMergePass());
+ optimizer.RegisterPass(spvtools::CreateLocalMultiStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateIfConversionPass());
+ optimizer.RegisterPass(spvtools::CreateSimplificationPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
+ if (options->optimizeSize) {
+ optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
+ // TODO(greg-lunarg): Add this when AMD driver issues are resolved
+ // optimizer.RegisterPass(CreateCommonUniformElimPass());
+ }
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
+ if (options->generateDebugInfo) {
+ optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass());
+ }
+
+ spvtools::OptimizerOptions spvOptOptions;
+ spvOptOptions.set_run_validator(false); // The validator may run as a seperate step later on
+ optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions);
+}
+
+}; // end namespace glslang
+
+#endif
diff --git a/src/3rdparty/glslang/SPIRV/SpvTools.h b/src/3rdparty/glslang/SPIRV/SpvTools.h
new file mode 100644
index 0000000..7e49ae0
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/SpvTools.h
@@ -0,0 +1,80 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Call into SPIRV-Tools to disassemble, validate, and optimize.
+//
+
+#pragma once
+#ifndef GLSLANG_SPV_TOOLS_H
+#define GLSLANG_SPV_TOOLS_H
+
+#include <vector>
+#include <ostream>
+
+#include "../glslang/MachineIndependent/localintermediate.h"
+#include "Logger.h"
+
+namespace glslang {
+
+struct SpvOptions {
+ SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
+ optimizeSize(false), disassemble(false), validate(false) { }
+ bool generateDebugInfo;
+ bool disableOptimizer;
+ bool optimizeSize;
+ bool disassemble;
+ bool validate;
+};
+
+#if ENABLE_OPT
+
+// Use the SPIRV-Tools disassembler to print SPIR-V.
+void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
+
+// Apply the SPIRV-Tools validator to generated SPIR-V.
+void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*);
+
+// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
+// legalizing HLSL SPIR-V.
+void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*, const SpvOptions*);
+
+#endif
+
+} // end namespace glslang
+
+#endif // GLSLANG_SPV_TOOLS_H
diff --git a/src/3rdparty/glslang/SPIRV/bitutils.h b/src/3rdparty/glslang/SPIRV/bitutils.h
new file mode 100644
index 0000000..22e44ce
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/bitutils.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_BITUTILS_H_
+#define LIBSPIRV_UTIL_BITUTILS_H_
+
+#include <cstdint>
+#include <cstring>
+
+namespace spvutils {
+
+// Performs a bitwise copy of source to the destination type Dest.
+template <typename Dest, typename Src>
+Dest BitwiseCast(Src source) {
+ Dest dest;
+ static_assert(sizeof(source) == sizeof(dest),
+ "BitwiseCast: Source and destination must have the same size");
+ std::memcpy(static_cast<void*>(&dest), &source, sizeof(dest));
+ return dest;
+}
+
+// SetBits<T, First, Num> returns an integer of type <T> with bits set
+// for position <First> through <First + Num - 1>, counting from the least
+// significant bit. In particular when Num == 0, no positions are set to 1.
+// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
+// a bit that will not fit in the underlying type is set.
+template <typename T, size_t First = 0, size_t Num = 0>
+struct SetBits {
+ static_assert(First < sizeof(T) * 8,
+ "Tried to set a bit that is shifted too far.");
+ const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
+};
+
+template <typename T, size_t Last>
+struct SetBits<T, Last, 0> {
+ const static T get = T(0);
+};
+
+// This is all compile-time so we can put our tests right here.
+static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
+ "SetBits failed");
+
+static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
+ "SetBits failed");
+
+} // namespace spvutils
+
+#endif // LIBSPIRV_UTIL_BITUTILS_H_
diff --git a/src/3rdparty/glslang/SPIRV/disassemble.cpp b/src/3rdparty/glslang/SPIRV/disassemble.cpp
new file mode 100644
index 0000000..631173c
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/disassemble.cpp
@@ -0,0 +1,759 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#include <cstdlib>
+#include <cstring>
+#include <cassert>
+#include <iomanip>
+#include <stack>
+#include <sstream>
+#include <cstring>
+
+#include "disassemble.h"
+#include "doc.h"
+#include "SpvTools.h"
+
+namespace spv {
+ extern "C" {
+ // Include C-based headers that don't have a namespace
+ #include "GLSL.std.450.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+ }
+}
+const char* GlslStd450DebugNames[spv::GLSLstd450Count];
+
+namespace spv {
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
+#endif
+
+#ifdef NV_EXTENSIONS
+static const char* GLSLextNVGetDebugNames(const char*, unsigned);
+#endif
+
+static void Kill(std::ostream& out, const char* message)
+{
+ out << std::endl << "Disassembly failed: " << message << std::endl;
+ exit(1);
+}
+
+// used to identify the extended instruction library imported when printing
+enum ExtInstSet {
+ GLSL450Inst,
+
+#ifdef AMD_EXTENSIONS
+ GLSLextAMDInst,
+#endif
+
+#ifdef NV_EXTENSIONS
+ GLSLextNVInst,
+#endif
+
+ OpenCLExtInst,
+};
+
+// Container class for a single instance of a SPIR-V stream, with methods for disassembly.
+class SpirvStream {
+public:
+ SpirvStream(std::ostream& out, const std::vector<unsigned int>& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { }
+ virtual ~SpirvStream() { }
+
+ void validate();
+ void processInstructions();
+
+protected:
+ SpirvStream(const SpirvStream&);
+ SpirvStream& operator=(const SpirvStream&);
+ Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; }
+
+ // Output methods
+ void outputIndent();
+ void formatId(Id id, std::stringstream&);
+ void outputResultId(Id id);
+ void outputTypeId(Id id);
+ void outputId(Id id);
+ void outputMask(OperandClass operandClass, unsigned mask);
+ void disassembleImmediates(int numOperands);
+ void disassembleIds(int numOperands);
+ int disassembleString();
+ void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands);
+
+ // Data
+ std::ostream& out; // where to write the disassembly
+ const std::vector<unsigned int>& stream; // the actual word stream
+ int size; // the size of the word stream
+ int word; // the next word of the stream to read
+
+ // map each <id> to the instruction that created it
+ Id bound;
+ std::vector<unsigned int> idInstruction; // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter)
+
+ std::vector<std::string> idDescriptor; // the best text string known for explaining the <id>
+
+ // schema
+ unsigned int schema;
+
+ // stack of structured-merge points
+ std::stack<Id> nestedControl;
+ Id nextNestedControl; // need a slight delay for when we are nested
+};
+
+void SpirvStream::validate()
+{
+ size = (int)stream.size();
+ if (size < 4)
+ Kill(out, "stream is too short");
+
+ // Magic number
+ if (stream[word++] != MagicNumber) {
+ out << "Bad magic number";
+ return;
+ }
+
+ // Version
+ out << "// Module Version " << std::hex << stream[word++] << std::endl;
+
+ // Generator's magic number
+ out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl;
+
+ // Result <id> bound
+ bound = stream[word++];
+ idInstruction.resize(bound);
+ idDescriptor.resize(bound);
+ out << "// Id's are bound by " << bound << std::endl;
+ out << std::endl;
+
+ // Reserved schema, must be 0 for now
+ schema = stream[word++];
+ if (schema != 0)
+ Kill(out, "bad schema, must be 0");
+}
+
+// Loop over all the instructions, in order, processing each.
+// Boiler plate for each is handled here directly, the rest is dispatched.
+void SpirvStream::processInstructions()
+{
+ // Instructions
+ while (word < size) {
+ int instructionStart = word;
+
+ // Instruction wordCount and opcode
+ unsigned int firstWord = stream[word];
+ unsigned wordCount = firstWord >> WordCountShift;
+ Op opCode = (Op)(firstWord & OpCodeMask);
+ int nextInst = word + wordCount;
+ ++word;
+
+ // Presence of full instruction
+ if (nextInst > size)
+ Kill(out, "stream instruction terminated too early");
+
+ // Base for computing number of operands; will be updated as more is learned
+ unsigned numOperands = wordCount - 1;
+
+ // Type <id>
+ Id typeId = 0;
+ if (InstructionDesc[opCode].hasType()) {
+ typeId = stream[word++];
+ --numOperands;
+ }
+
+ // Result <id>
+ Id resultId = 0;
+ if (InstructionDesc[opCode].hasResult()) {
+ resultId = stream[word++];
+ --numOperands;
+
+ // save instruction for future reference
+ idInstruction[resultId] = instructionStart;
+ }
+
+ outputResultId(resultId);
+ outputTypeId(typeId);
+ outputIndent();
+
+ // Hand off the Op and all its operands
+ disassembleInstruction(resultId, typeId, opCode, numOperands);
+ if (word != nextInst) {
+ out << " ERROR, incorrect number of operands consumed. At " << word << " instead of " << nextInst << " instruction start was " << instructionStart;
+ word = nextInst;
+ }
+ out << std::endl;
+ }
+}
+
+void SpirvStream::outputIndent()
+{
+ for (int i = 0; i < (int)nestedControl.size(); ++i)
+ out << " ";
+}
+
+void SpirvStream::formatId(Id id, std::stringstream& idStream)
+{
+ if (id != 0) {
+ // On instructions with no IDs, this is called with "0", which does not
+ // have to be within ID bounds on null shaders.
+ if (id >= bound)
+ Kill(out, "Bad <id>");
+
+ idStream << id;
+ if (idDescriptor[id].size() > 0)
+ idStream << "(" << idDescriptor[id] << ")";
+ }
+}
+
+void SpirvStream::outputResultId(Id id)
+{
+ const int width = 16;
+ std::stringstream idStream;
+ formatId(id, idStream);
+ out << std::setw(width) << std::right << idStream.str();
+ if (id != 0)
+ out << ":";
+ else
+ out << " ";
+
+ if (nestedControl.size() && id == nestedControl.top())
+ nestedControl.pop();
+}
+
+void SpirvStream::outputTypeId(Id id)
+{
+ const int width = 12;
+ std::stringstream idStream;
+ formatId(id, idStream);
+ out << std::setw(width) << std::right << idStream.str() << " ";
+}
+
+void SpirvStream::outputId(Id id)
+{
+ if (id >= bound)
+ Kill(out, "Bad <id>");
+
+ out << id;
+ if (idDescriptor[id].size() > 0)
+ out << "(" << idDescriptor[id] << ")";
+}
+
+void SpirvStream::outputMask(OperandClass operandClass, unsigned mask)
+{
+ if (mask == 0)
+ out << "None";
+ else {
+ for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) {
+ if (mask & (1 << m))
+ out << OperandClassParams[operandClass].getName(m) << " ";
+ }
+ }
+}
+
+void SpirvStream::disassembleImmediates(int numOperands)
+{
+ for (int i = 0; i < numOperands; ++i) {
+ out << stream[word++];
+ if (i < numOperands - 1)
+ out << " ";
+ }
+}
+
+void SpirvStream::disassembleIds(int numOperands)
+{
+ for (int i = 0; i < numOperands; ++i) {
+ outputId(stream[word++]);
+ if (i < numOperands - 1)
+ out << " ";
+ }
+}
+
+// return the number of operands consumed by the string
+int SpirvStream::disassembleString()
+{
+ int startWord = word;
+
+ out << " \"";
+
+ const char* wordString;
+ bool done = false;
+ do {
+ unsigned int content = stream[word];
+ wordString = (const char*)&content;
+ for (int charCount = 0; charCount < 4; ++charCount) {
+ if (*wordString == 0) {
+ done = true;
+ break;
+ }
+ out << *(wordString++);
+ }
+ ++word;
+ } while (! done);
+
+ out << "\"";
+
+ return word - startWord;
+}
+
+void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands)
+{
+ // Process the opcode
+
+ out << (OpcodeString(opCode) + 2); // leave out the "Op"
+
+ if (opCode == OpLoopMerge || opCode == OpSelectionMerge)
+ nextNestedControl = stream[word];
+ else if (opCode == OpBranchConditional || opCode == OpSwitch) {
+ if (nextNestedControl) {
+ nestedControl.push(nextNestedControl);
+ nextNestedControl = 0;
+ }
+ } else if (opCode == OpExtInstImport) {
+ idDescriptor[resultId] = (const char*)(&stream[word]);
+ }
+ else {
+ if (resultId != 0 && idDescriptor[resultId].size() == 0) {
+ switch (opCode) {
+ case OpTypeInt:
+ switch (stream[word]) {
+ case 8: idDescriptor[resultId] = "int8_t"; break;
+ case 16: idDescriptor[resultId] = "int16_t"; break;
+ default: assert(0); // fallthrough
+ case 32: idDescriptor[resultId] = "int"; break;
+ case 64: idDescriptor[resultId] = "int64_t"; break;
+ }
+ break;
+ case OpTypeFloat:
+ switch (stream[word]) {
+ case 16: idDescriptor[resultId] = "float16_t"; break;
+ default: assert(0); // fallthrough
+ case 32: idDescriptor[resultId] = "float"; break;
+ case 64: idDescriptor[resultId] = "float64_t"; break;
+ }
+ break;
+ case OpTypeBool:
+ idDescriptor[resultId] = "bool";
+ break;
+ case OpTypeStruct:
+ idDescriptor[resultId] = "struct";
+ break;
+ case OpTypePointer:
+ idDescriptor[resultId] = "ptr";
+ break;
+ case OpTypeVector:
+ if (idDescriptor[stream[word]].size() > 0) {
+ idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1);
+ if (strstr(idDescriptor[stream[word]].c_str(), "8")) {
+ idDescriptor[resultId].append("8");
+ }
+ if (strstr(idDescriptor[stream[word]].c_str(), "16")) {
+ idDescriptor[resultId].append("16");
+ }
+ if (strstr(idDescriptor[stream[word]].c_str(), "64")) {
+ idDescriptor[resultId].append("64");
+ }
+ }
+ idDescriptor[resultId].append("vec");
+ switch (stream[word + 1]) {
+ case 2: idDescriptor[resultId].append("2"); break;
+ case 3: idDescriptor[resultId].append("3"); break;
+ case 4: idDescriptor[resultId].append("4"); break;
+ case 8: idDescriptor[resultId].append("8"); break;
+ case 16: idDescriptor[resultId].append("16"); break;
+ case 32: idDescriptor[resultId].append("32"); break;
+ default: break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Process the operands. Note, a new context-dependent set could be
+ // swapped in mid-traversal.
+
+ // Handle images specially, so can put out helpful strings.
+ if (opCode == OpTypeImage) {
+ out << " ";
+ disassembleIds(1);
+ out << " " << DimensionString((Dim)stream[word++]);
+ out << (stream[word++] != 0 ? " depth" : "");
+ out << (stream[word++] != 0 ? " array" : "");
+ out << (stream[word++] != 0 ? " multi-sampled" : "");
+ switch (stream[word++]) {
+ case 0: out << " runtime"; break;
+ case 1: out << " sampled"; break;
+ case 2: out << " nonsampled"; break;
+ }
+ out << " format:" << ImageFormatString((ImageFormat)stream[word++]);
+
+ if (numOperands == 8) {
+ out << " " << AccessQualifierString(stream[word++]);
+ }
+ return;
+ }
+
+ // Handle all the parameterized operands
+ for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) {
+ out << " ";
+ OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op);
+ switch (operandClass) {
+ case OperandId:
+ case OperandScope:
+ case OperandMemorySemantics:
+ disassembleIds(1);
+ --numOperands;
+ // Get names for printing "(XXX)" for readability, *after* this id
+ if (opCode == OpName)
+ idDescriptor[stream[word - 1]] = (const char*)(&stream[word]);
+ break;
+ case OperandVariableIds:
+ disassembleIds(numOperands);
+ return;
+ case OperandImageOperands:
+ outputMask(OperandImageOperands, stream[word++]);
+ --numOperands;
+ disassembleIds(numOperands);
+ return;
+ case OperandOptionalLiteral:
+ case OperandVariableLiterals:
+ if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) ||
+ (opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) {
+ out << BuiltInString(stream[word++]);
+ --numOperands;
+ ++op;
+ }
+ disassembleImmediates(numOperands);
+ return;
+ case OperandVariableIdLiteral:
+ while (numOperands > 0) {
+ out << std::endl;
+ outputResultId(0);
+ outputTypeId(0);
+ outputIndent();
+ out << " Type ";
+ disassembleIds(1);
+ out << ", member ";
+ disassembleImmediates(1);
+ numOperands -= 2;
+ }
+ return;
+ case OperandVariableLiteralId:
+ while (numOperands > 0) {
+ out << std::endl;
+ outputResultId(0);
+ outputTypeId(0);
+ outputIndent();
+ out << " case ";
+ disassembleImmediates(1);
+ out << ": ";
+ disassembleIds(1);
+ numOperands -= 2;
+ }
+ return;
+ case OperandLiteralNumber:
+ disassembleImmediates(1);
+ --numOperands;
+ if (opCode == OpExtInst) {
+ ExtInstSet extInstSet = GLSL450Inst;
+ const char* name = idDescriptor[stream[word - 2]].c_str();
+ if (0 == memcmp("OpenCL", name, 6)) {
+ extInstSet = OpenCLExtInst;
+#ifdef AMD_EXTENSIONS
+ } else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
+ extInstSet = GLSLextAMDInst;
+#endif
+#ifdef NV_EXTENSIONS
+ }else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
+ strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
+ strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
+ strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
+ strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
+ strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
+ extInstSet = GLSLextNVInst;
+#endif
+ }
+ unsigned entrypoint = stream[word - 1];
+ if (extInstSet == GLSL450Inst) {
+ if (entrypoint < GLSLstd450Count) {
+ out << "(" << GlslStd450DebugNames[entrypoint] << ")";
+ }
+#ifdef AMD_EXTENSIONS
+ } else if (extInstSet == GLSLextAMDInst) {
+ out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
+#endif
+#ifdef NV_EXTENSIONS
+ }
+ else if (extInstSet == GLSLextNVInst) {
+ out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
+#endif
+ }
+ }
+ break;
+ case OperandOptionalLiteralString:
+ case OperandLiteralString:
+ numOperands -= disassembleString();
+ break;
+ case OperandMemoryAccess:
+ outputMask(OperandMemoryAccess, stream[word++]);
+ --numOperands;
+ // Aligned is the only memory access operand that uses an immediate
+ // value, and it is also the first operand that uses a value at all.
+ if (stream[word-1] & MemoryAccessAlignedMask) {
+ disassembleImmediates(1);
+ numOperands--;
+ if (numOperands)
+ out << " ";
+ }
+ disassembleIds(numOperands);
+ return;
+ default:
+ assert(operandClass >= OperandSource && operandClass < OperandOpcode);
+
+ if (OperandClassParams[operandClass].bitmask)
+ outputMask(operandClass, stream[word++]);
+ else
+ out << OperandClassParams[operandClass].getName(stream[word++]);
+ --numOperands;
+
+ break;
+ }
+ }
+
+ return;
+}
+
+static void GLSLstd450GetDebugNames(const char** names)
+{
+ for (int i = 0; i < GLSLstd450Count; ++i)
+ names[i] = "Unknown";
+
+ names[GLSLstd450Round] = "Round";
+ names[GLSLstd450RoundEven] = "RoundEven";
+ names[GLSLstd450Trunc] = "Trunc";
+ names[GLSLstd450FAbs] = "FAbs";
+ names[GLSLstd450SAbs] = "SAbs";
+ names[GLSLstd450FSign] = "FSign";
+ names[GLSLstd450SSign] = "SSign";
+ names[GLSLstd450Floor] = "Floor";
+ names[GLSLstd450Ceil] = "Ceil";
+ names[GLSLstd450Fract] = "Fract";
+ names[GLSLstd450Radians] = "Radians";
+ names[GLSLstd450Degrees] = "Degrees";
+ names[GLSLstd450Sin] = "Sin";
+ names[GLSLstd450Cos] = "Cos";
+ names[GLSLstd450Tan] = "Tan";
+ names[GLSLstd450Asin] = "Asin";
+ names[GLSLstd450Acos] = "Acos";
+ names[GLSLstd450Atan] = "Atan";
+ names[GLSLstd450Sinh] = "Sinh";
+ names[GLSLstd450Cosh] = "Cosh";
+ names[GLSLstd450Tanh] = "Tanh";
+ names[GLSLstd450Asinh] = "Asinh";
+ names[GLSLstd450Acosh] = "Acosh";
+ names[GLSLstd450Atanh] = "Atanh";
+ names[GLSLstd450Atan2] = "Atan2";
+ names[GLSLstd450Pow] = "Pow";
+ names[GLSLstd450Exp] = "Exp";
+ names[GLSLstd450Log] = "Log";
+ names[GLSLstd450Exp2] = "Exp2";
+ names[GLSLstd450Log2] = "Log2";
+ names[GLSLstd450Sqrt] = "Sqrt";
+ names[GLSLstd450InverseSqrt] = "InverseSqrt";
+ names[GLSLstd450Determinant] = "Determinant";
+ names[GLSLstd450MatrixInverse] = "MatrixInverse";
+ names[GLSLstd450Modf] = "Modf";
+ names[GLSLstd450ModfStruct] = "ModfStruct";
+ names[GLSLstd450FMin] = "FMin";
+ names[GLSLstd450SMin] = "SMin";
+ names[GLSLstd450UMin] = "UMin";
+ names[GLSLstd450FMax] = "FMax";
+ names[GLSLstd450SMax] = "SMax";
+ names[GLSLstd450UMax] = "UMax";
+ names[GLSLstd450FClamp] = "FClamp";
+ names[GLSLstd450SClamp] = "SClamp";
+ names[GLSLstd450UClamp] = "UClamp";
+ names[GLSLstd450FMix] = "FMix";
+ names[GLSLstd450Step] = "Step";
+ names[GLSLstd450SmoothStep] = "SmoothStep";
+ names[GLSLstd450Fma] = "Fma";
+ names[GLSLstd450Frexp] = "Frexp";
+ names[GLSLstd450FrexpStruct] = "FrexpStruct";
+ names[GLSLstd450Ldexp] = "Ldexp";
+ names[GLSLstd450PackSnorm4x8] = "PackSnorm4x8";
+ names[GLSLstd450PackUnorm4x8] = "PackUnorm4x8";
+ names[GLSLstd450PackSnorm2x16] = "PackSnorm2x16";
+ names[GLSLstd450PackUnorm2x16] = "PackUnorm2x16";
+ names[GLSLstd450PackHalf2x16] = "PackHalf2x16";
+ names[GLSLstd450PackDouble2x32] = "PackDouble2x32";
+ names[GLSLstd450UnpackSnorm2x16] = "UnpackSnorm2x16";
+ names[GLSLstd450UnpackUnorm2x16] = "UnpackUnorm2x16";
+ names[GLSLstd450UnpackHalf2x16] = "UnpackHalf2x16";
+ names[GLSLstd450UnpackSnorm4x8] = "UnpackSnorm4x8";
+ names[GLSLstd450UnpackUnorm4x8] = "UnpackUnorm4x8";
+ names[GLSLstd450UnpackDouble2x32] = "UnpackDouble2x32";
+ names[GLSLstd450Length] = "Length";
+ names[GLSLstd450Distance] = "Distance";
+ names[GLSLstd450Cross] = "Cross";
+ names[GLSLstd450Normalize] = "Normalize";
+ names[GLSLstd450FaceForward] = "FaceForward";
+ names[GLSLstd450Reflect] = "Reflect";
+ names[GLSLstd450Refract] = "Refract";
+ names[GLSLstd450FindILsb] = "FindILsb";
+ names[GLSLstd450FindSMsb] = "FindSMsb";
+ names[GLSLstd450FindUMsb] = "FindUMsb";
+ names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
+ names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
+ names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
+}
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
+{
+ if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
+ switch (entrypoint) {
+ case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD";
+ case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD";
+ case WriteInvocationAMD: return "WriteInvocationAMD";
+ case MbcntAMD: return "MbcntAMD";
+ default: return "Bad";
+ }
+ } else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) {
+ switch (entrypoint) {
+ case FMin3AMD: return "FMin3AMD";
+ case UMin3AMD: return "UMin3AMD";
+ case SMin3AMD: return "SMin3AMD";
+ case FMax3AMD: return "FMax3AMD";
+ case UMax3AMD: return "UMax3AMD";
+ case SMax3AMD: return "SMax3AMD";
+ case FMid3AMD: return "FMid3AMD";
+ case UMid3AMD: return "UMid3AMD";
+ case SMid3AMD: return "SMid3AMD";
+ default: return "Bad";
+ }
+ } else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) {
+ switch (entrypoint) {
+ case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD";
+ default: return "Bad";
+ }
+ }
+ else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) {
+ switch (entrypoint) {
+ case CubeFaceIndexAMD: return "CubeFaceIndexAMD";
+ case CubeFaceCoordAMD: return "CubeFaceCoordAMD";
+ case TimeAMD: return "TimeAMD";
+ default:
+ break;
+ }
+ }
+
+ return "Bad";
+}
+#endif
+
+#ifdef NV_EXTENSIONS
+static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
+{
+ if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
+ strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
+ strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
+ strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
+ strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
+ strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
+ strcmp(name, spv::E_SPV_NV_mesh_shader) == 0) {
+ switch (entrypoint) {
+ // NV builtins
+ case BuiltInViewportMaskNV: return "ViewportMaskNV";
+ case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
+ case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case BuiltInPositionPerViewNV: return "PositionPerViewNV";
+ case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+ case BuiltInBaryCoordNV: return "BaryCoordNV";
+ case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+ case BuiltInTaskCountNV: return "TaskCountNV";
+ case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
+ case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case BuiltInLayerPerViewNV: return "LayerPerViewNV";
+ case BuiltInMeshViewCountNV: return "MeshViewCountNV";
+ case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
+
+ // NV Capabilities
+ case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
+ case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
+ case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
+ case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
+ case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
+ case CapabilityMeshShadingNV: return "MeshShadingNV";
+
+ // NV Decorations
+ case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
+ case DecorationPassthroughNV: return "PassthroughNV";
+ case DecorationViewportRelativeNV: return "ViewportRelativeNV";
+ case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
+ case DecorationPerVertexNV: return "PerVertexNV";
+ case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
+ case DecorationPerViewNV: return "PerViewNV";
+ case DecorationPerTaskNV: return "PerTaskNV";
+
+ default: return "Bad";
+ }
+ }
+ return "Bad";
+}
+#endif
+
+void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
+{
+ SpirvStream SpirvStream(out, stream);
+ spv::Parameterize();
+ GLSLstd450GetDebugNames(GlslStd450DebugNames);
+ SpirvStream.validate();
+ SpirvStream.processInstructions();
+}
+
+}; // end namespace spv
diff --git a/src/3rdparty/glslang/SPIRV/disassemble.h b/src/3rdparty/glslang/SPIRV/disassemble.h
new file mode 100644
index 0000000..b6a4635
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/disassemble.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#pragma once
+#ifndef disassembler_H
+#define disassembler_H
+
+#include <iostream>
+#include <vector>
+
+namespace spv {
+
+ // disassemble with glslang custom disassembler
+ void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
+
+} // end namespace spv
+
+#endif // disassembler_H
diff --git a/src/3rdparty/glslang/SPIRV/doc.cpp b/src/3rdparty/glslang/SPIRV/doc.cpp
new file mode 100644
index 0000000..76e1df8
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/doc.cpp
@@ -0,0 +1,2757 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// 1) Programmatically fill in instruction/operand information.
+// This can be used for disassembly, printing documentation, etc.
+//
+// 2) Print documentation from this parameterization.
+//
+
+#include "doc.h"
+
+#include <cstdio>
+#include <cstring>
+#include <algorithm>
+
+namespace spv {
+ extern "C" {
+ // Include C-based headers that don't have a namespace
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+ }
+}
+
+namespace spv {
+
+//
+// Whole set of functions that translate enumerants to their text strings for
+// the specification (or their sanitized versions for auto-generating the
+// spirv headers.
+//
+// Also, for masks the ceilings are declared next to these, to help keep them in sync.
+// Ceilings should be
+// - one more than the maximum value an enumerant takes on, for non-mask enumerants
+// (for non-sparse enums, this is the number of enumerants)
+// - the number of bits consumed by the set of masks
+// (for non-sparse mask enums, this is the number of enumerants)
+//
+
+const char* SourceString(int source)
+{
+ switch (source) {
+ case 0: return "Unknown";
+ case 1: return "ESSL";
+ case 2: return "GLSL";
+ case 3: return "OpenCL_C";
+ case 4: return "OpenCL_CPP";
+ case 5: return "HLSL";
+
+ default: return "Bad";
+ }
+}
+
+const char* ExecutionModelString(int model)
+{
+ switch (model) {
+ case 0: return "Vertex";
+ case 1: return "TessellationControl";
+ case 2: return "TessellationEvaluation";
+ case 3: return "Geometry";
+ case 4: return "Fragment";
+ case 5: return "GLCompute";
+ case 6: return "Kernel";
+#ifdef NV_EXTENSIONS
+ case ExecutionModelTaskNV: return "TaskNV";
+ case ExecutionModelMeshNV: return "MeshNV";
+#endif
+
+ default: return "Bad";
+
+#ifdef NV_EXTENSIONS
+ case ExecutionModelRayGenerationNV: return "RayGenerationNV";
+ case ExecutionModelIntersectionNV: return "IntersectionNV";
+ case ExecutionModelAnyHitNV: return "AnyHitNV";
+ case ExecutionModelClosestHitNV: return "ClosestHitNV";
+ case ExecutionModelMissNV: return "MissNV";
+ case ExecutionModelCallableNV: return "CallableNV";
+#endif
+
+ }
+}
+
+const char* AddressingString(int addr)
+{
+ switch (addr) {
+ case 0: return "Logical";
+ case 1: return "Physical32";
+ case 2: return "Physical64";
+
+ case AddressingModelPhysicalStorageBuffer64EXT: return "PhysicalStorageBuffer64EXT";
+
+ default: return "Bad";
+ }
+}
+
+const char* MemoryString(int mem)
+{
+ switch (mem) {
+ case MemoryModelSimple: return "Simple";
+ case MemoryModelGLSL450: return "GLSL450";
+ case MemoryModelOpenCL: return "OpenCL";
+ case MemoryModelVulkanKHR: return "VulkanKHR";
+
+ default: return "Bad";
+ }
+}
+
+const int ExecutionModeCeiling = 33;
+
+const char* ExecutionModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "Invocations";
+ case 1: return "SpacingEqual";
+ case 2: return "SpacingFractionalEven";
+ case 3: return "SpacingFractionalOdd";
+ case 4: return "VertexOrderCw";
+ case 5: return "VertexOrderCcw";
+ case 6: return "PixelCenterInteger";
+ case 7: return "OriginUpperLeft";
+ case 8: return "OriginLowerLeft";
+ case 9: return "EarlyFragmentTests";
+ case 10: return "PointMode";
+ case 11: return "Xfb";
+ case 12: return "DepthReplacing";
+ case 13: return "Bad";
+ case 14: return "DepthGreater";
+ case 15: return "DepthLess";
+ case 16: return "DepthUnchanged";
+ case 17: return "LocalSize";
+ case 18: return "LocalSizeHint";
+ case 19: return "InputPoints";
+ case 20: return "InputLines";
+ case 21: return "InputLinesAdjacency";
+ case 22: return "Triangles";
+ case 23: return "InputTrianglesAdjacency";
+ case 24: return "Quads";
+ case 25: return "Isolines";
+ case 26: return "OutputVertices";
+ case 27: return "OutputPoints";
+ case 28: return "OutputLineStrip";
+ case 29: return "OutputTriangleStrip";
+ case 30: return "VecTypeHint";
+ case 31: return "ContractionOff";
+ case 32: return "Bad";
+
+ case 4446: return "PostDepthCoverage";
+
+#ifdef NV_EXTENSIONS
+ case ExecutionModeOutputLinesNV: return "OutputLinesNV";
+ case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
+ case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
+ case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
+ case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
+#endif
+
+ case ExecutionModeCeiling:
+ default: return "Bad";
+ }
+}
+
+const char* StorageClassString(int StorageClass)
+{
+ switch (StorageClass) {
+ case 0: return "UniformConstant";
+ case 1: return "Input";
+ case 2: return "Uniform";
+ case 3: return "Output";
+ case 4: return "Workgroup";
+ case 5: return "CrossWorkgroup";
+ case 6: return "Private";
+ case 7: return "Function";
+ case 8: return "Generic";
+ case 9: return "PushConstant";
+ case 10: return "AtomicCounter";
+ case 11: return "Image";
+ case 12: return "StorageBuffer";
+
+#ifdef NV_EXTENSIONS
+ case StorageClassRayPayloadNV: return "RayPayloadNV";
+ case StorageClassHitAttributeNV: return "HitAttributeNV";
+ case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
+ case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
+ case StorageClassCallableDataNV: return "CallableDataNV";
+ case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
+#endif
+
+ case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
+
+ default: return "Bad";
+ }
+}
+
+const int DecorationCeiling = 45;
+
+const char* DecorationString(int decoration)
+{
+ switch (decoration) {
+ case 0: return "RelaxedPrecision";
+ case 1: return "SpecId";
+ case 2: return "Block";
+ case 3: return "BufferBlock";
+ case 4: return "RowMajor";
+ case 5: return "ColMajor";
+ case 6: return "ArrayStride";
+ case 7: return "MatrixStride";
+ case 8: return "GLSLShared";
+ case 9: return "GLSLPacked";
+ case 10: return "CPacked";
+ case 11: return "BuiltIn";
+ case 12: return "Bad";
+ case 13: return "NoPerspective";
+ case 14: return "Flat";
+ case 15: return "Patch";
+ case 16: return "Centroid";
+ case 17: return "Sample";
+ case 18: return "Invariant";
+ case 19: return "Restrict";
+ case 20: return "Aliased";
+ case 21: return "Volatile";
+ case 22: return "Constant";
+ case 23: return "Coherent";
+ case 24: return "NonWritable";
+ case 25: return "NonReadable";
+ case 26: return "Uniform";
+ case 27: return "Bad";
+ case 28: return "SaturatedConversion";
+ case 29: return "Stream";
+ case 30: return "Location";
+ case 31: return "Component";
+ case 32: return "Index";
+ case 33: return "Binding";
+ case 34: return "DescriptorSet";
+ case 35: return "Offset";
+ case 36: return "XfbBuffer";
+ case 37: return "XfbStride";
+ case 38: return "FuncParamAttr";
+ case 39: return "FP Rounding Mode";
+ case 40: return "FP Fast Math Mode";
+ case 41: return "Linkage Attributes";
+ case 42: return "NoContraction";
+ case 43: return "InputAttachmentIndex";
+ case 44: return "Alignment";
+
+ case DecorationCeiling:
+ default: return "Bad";
+
+#ifdef AMD_EXTENSIONS
+ case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
+#endif
+#ifdef NV_EXTENSIONS
+ case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
+ case DecorationPassthroughNV: return "PassthroughNV";
+ case DecorationViewportRelativeNV: return "ViewportRelativeNV";
+ case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
+ case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
+ case DecorationPerViewNV: return "PerViewNV";
+ case DecorationPerTaskNV: return "PerTaskNV";
+ case DecorationPerVertexNV: return "PerVertexNV";
+#endif
+
+ case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
+ case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
+ case DecorationHlslSemanticGOOGLE: return "DecorationHlslSemanticGOOGLE";
+ case DecorationRestrictPointerEXT: return "DecorationRestrictPointerEXT";
+ case DecorationAliasedPointerEXT: return "DecorationAliasedPointerEXT";
+ }
+}
+
+const char* BuiltInString(int builtIn)
+{
+ switch (builtIn) {
+ case 0: return "Position";
+ case 1: return "PointSize";
+ case 2: return "Bad";
+ case 3: return "ClipDistance";
+ case 4: return "CullDistance";
+ case 5: return "VertexId";
+ case 6: return "InstanceId";
+ case 7: return "PrimitiveId";
+ case 8: return "InvocationId";
+ case 9: return "Layer";
+ case 10: return "ViewportIndex";
+ case 11: return "TessLevelOuter";
+ case 12: return "TessLevelInner";
+ case 13: return "TessCoord";
+ case 14: return "PatchVertices";
+ case 15: return "FragCoord";
+ case 16: return "PointCoord";
+ case 17: return "FrontFacing";
+ case 18: return "SampleId";
+ case 19: return "SamplePosition";
+ case 20: return "SampleMask";
+ case 21: return "Bad";
+ case 22: return "FragDepth";
+ case 23: return "HelperInvocation";
+ case 24: return "NumWorkgroups";
+ case 25: return "WorkgroupSize";
+ case 26: return "WorkgroupId";
+ case 27: return "LocalInvocationId";
+ case 28: return "GlobalInvocationId";
+ case 29: return "LocalInvocationIndex";
+ case 30: return "WorkDim";
+ case 31: return "GlobalSize";
+ case 32: return "EnqueuedWorkgroupSize";
+ case 33: return "GlobalOffset";
+ case 34: return "GlobalLinearId";
+ case 35: return "Bad";
+ case 36: return "SubgroupSize";
+ case 37: return "SubgroupMaxSize";
+ case 38: return "NumSubgroups";
+ case 39: return "NumEnqueuedSubgroups";
+ case 40: return "SubgroupId";
+ case 41: return "SubgroupLocalInvocationId";
+ case 42: return "VertexIndex"; // TBD: put next to VertexId?
+ case 43: return "InstanceIndex"; // TBD: put next to InstanceId?
+
+ case 4416: return "SubgroupEqMaskKHR";
+ case 4417: return "SubgroupGeMaskKHR";
+ case 4418: return "SubgroupGtMaskKHR";
+ case 4419: return "SubgroupLeMaskKHR";
+ case 4420: return "SubgroupLtMaskKHR";
+ case 4438: return "DeviceIndex";
+ case 4440: return "ViewIndex";
+ case 4424: return "BaseVertex";
+ case 4425: return "BaseInstance";
+ case 4426: return "DrawIndex";
+ case 5014: return "FragStencilRefEXT";
+
+#ifdef AMD_EXTENSIONS
+ case 4992: return "BaryCoordNoPerspAMD";
+ case 4993: return "BaryCoordNoPerspCentroidAMD";
+ case 4994: return "BaryCoordNoPerspSampleAMD";
+ case 4995: return "BaryCoordSmoothAMD";
+ case 4996: return "BaryCoordSmoothCentroidAMD";
+ case 4997: return "BaryCoordSmoothSampleAMD";
+ case 4998: return "BaryCoordPullModelAMD";
+#endif
+
+#ifdef NV_EXTENSIONS
+ case BuiltInLaunchIdNV: return "LaunchIdNV";
+ case BuiltInLaunchSizeNV: return "LaunchSizeNV";
+ case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
+ case BuiltInWorldRayDirectionNV: return "WorldRayDirectionNV";
+ case BuiltInObjectRayOriginNV: return "ObjectRayOriginNV";
+ case BuiltInObjectRayDirectionNV: return "ObjectRayDirectionNV";
+ case BuiltInRayTminNV: return "RayTminNV";
+ case BuiltInRayTmaxNV: return "RayTmaxNV";
+ case BuiltInInstanceCustomIndexNV: return "InstanceCustomIndexNV";
+ case BuiltInObjectToWorldNV: return "ObjectToWorldNV";
+ case BuiltInWorldToObjectNV: return "WorldToObjectNV";
+ case BuiltInHitTNV: return "HitTNV";
+ case BuiltInHitKindNV: return "HitKindNV";
+ case BuiltInIncomingRayFlagsNV: return "IncomingRayFlagsNV";
+ case BuiltInViewportMaskNV: return "ViewportMaskNV";
+ case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
+ case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case BuiltInPositionPerViewNV: return "PositionPerViewNV";
+ case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+// case BuiltInFragmentSizeNV: return "FragmentSizeNV"; // superseded by BuiltInFragSizeEXT
+// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
+ case BuiltInBaryCoordNV: return "BaryCoordNV";
+ case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+#endif
+
+ case BuiltInFragSizeEXT: return "FragSizeEXT";
+ case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
+
+ case 5264: return "FullyCoveredEXT";
+
+
+#ifdef NV_EXTENSIONS
+ case BuiltInTaskCountNV: return "TaskCountNV";
+ case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
+ case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case BuiltInLayerPerViewNV: return "LayerPerViewNV";
+ case BuiltInMeshViewCountNV: return "MeshViewCountNV";
+ case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
+#endif
+
+ default: return "Bad";
+ }
+}
+
+const char* DimensionString(int dim)
+{
+ switch (dim) {
+ case 0: return "1D";
+ case 1: return "2D";
+ case 2: return "3D";
+ case 3: return "Cube";
+ case 4: return "Rect";
+ case 5: return "Buffer";
+ case 6: return "SubpassData";
+
+ default: return "Bad";
+ }
+}
+
+const char* SamplerAddressingModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "None";
+ case 1: return "ClampToEdge";
+ case 2: return "Clamp";
+ case 3: return "Repeat";
+ case 4: return "RepeatMirrored";
+
+ default: return "Bad";
+ }
+}
+
+const char* SamplerFilterModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "Nearest";
+ case 1: return "Linear";
+
+ default: return "Bad";
+ }
+}
+
+const char* ImageFormatString(int format)
+{
+ switch (format) {
+ case 0: return "Unknown";
+
+ // ES/Desktop float
+ case 1: return "Rgba32f";
+ case 2: return "Rgba16f";
+ case 3: return "R32f";
+ case 4: return "Rgba8";
+ case 5: return "Rgba8Snorm";
+
+ // Desktop float
+ case 6: return "Rg32f";
+ case 7: return "Rg16f";
+ case 8: return "R11fG11fB10f";
+ case 9: return "R16f";
+ case 10: return "Rgba16";
+ case 11: return "Rgb10A2";
+ case 12: return "Rg16";
+ case 13: return "Rg8";
+ case 14: return "R16";
+ case 15: return "R8";
+ case 16: return "Rgba16Snorm";
+ case 17: return "Rg16Snorm";
+ case 18: return "Rg8Snorm";
+ case 19: return "R16Snorm";
+ case 20: return "R8Snorm";
+
+ // ES/Desktop int
+ case 21: return "Rgba32i";
+ case 22: return "Rgba16i";
+ case 23: return "Rgba8i";
+ case 24: return "R32i";
+
+ // Desktop int
+ case 25: return "Rg32i";
+ case 26: return "Rg16i";
+ case 27: return "Rg8i";
+ case 28: return "R16i";
+ case 29: return "R8i";
+
+ // ES/Desktop uint
+ case 30: return "Rgba32ui";
+ case 31: return "Rgba16ui";
+ case 32: return "Rgba8ui";
+ case 33: return "R32ui";
+
+ // Desktop uint
+ case 34: return "Rgb10a2ui";
+ case 35: return "Rg32ui";
+ case 36: return "Rg16ui";
+ case 37: return "Rg8ui";
+ case 38: return "R16ui";
+ case 39: return "R8ui";
+
+ default:
+ return "Bad";
+ }
+}
+
+const char* ImageChannelOrderString(int format)
+{
+ switch (format) {
+ case 0: return "R";
+ case 1: return "A";
+ case 2: return "RG";
+ case 3: return "RA";
+ case 4: return "RGB";
+ case 5: return "RGBA";
+ case 6: return "BGRA";
+ case 7: return "ARGB";
+ case 8: return "Intensity";
+ case 9: return "Luminance";
+ case 10: return "Rx";
+ case 11: return "RGx";
+ case 12: return "RGBx";
+ case 13: return "Depth";
+ case 14: return "DepthStencil";
+ case 15: return "sRGB";
+ case 16: return "sRGBx";
+ case 17: return "sRGBA";
+ case 18: return "sBGRA";
+
+ default:
+ return "Bad";
+ }
+}
+
+const char* ImageChannelDataTypeString(int type)
+{
+ switch (type)
+ {
+ case 0: return "SnormInt8";
+ case 1: return "SnormInt16";
+ case 2: return "UnormInt8";
+ case 3: return "UnormInt16";
+ case 4: return "UnormShort565";
+ case 5: return "UnormShort555";
+ case 6: return "UnormInt101010";
+ case 7: return "SignedInt8";
+ case 8: return "SignedInt16";
+ case 9: return "SignedInt32";
+ case 10: return "UnsignedInt8";
+ case 11: return "UnsignedInt16";
+ case 12: return "UnsignedInt32";
+ case 13: return "HalfFloat";
+ case 14: return "Float";
+ case 15: return "UnormInt24";
+ case 16: return "UnormInt101010_2";
+
+ default:
+ return "Bad";
+ }
+}
+
+const int ImageOperandsCeiling = 12;
+
+const char* ImageOperandsString(int format)
+{
+ switch (format) {
+ case ImageOperandsBiasShift: return "Bias";
+ case ImageOperandsLodShift: return "Lod";
+ case ImageOperandsGradShift: return "Grad";
+ case ImageOperandsConstOffsetShift: return "ConstOffset";
+ case ImageOperandsOffsetShift: return "Offset";
+ case ImageOperandsConstOffsetsShift: return "ConstOffsets";
+ case ImageOperandsSampleShift: return "Sample";
+ case ImageOperandsMinLodShift: return "MinLod";
+ case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR";
+ case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
+ case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
+ case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
+
+ case ImageOperandsCeiling:
+ default:
+ return "Bad";
+ }
+}
+
+const char* FPFastMathString(int mode)
+{
+ switch (mode) {
+ case 0: return "NotNaN";
+ case 1: return "NotInf";
+ case 2: return "NSZ";
+ case 3: return "AllowRecip";
+ case 4: return "Fast";
+
+ default: return "Bad";
+ }
+}
+
+const char* FPRoundingModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "RTE";
+ case 1: return "RTZ";
+ case 2: return "RTP";
+ case 3: return "RTN";
+
+ default: return "Bad";
+ }
+}
+
+const char* LinkageTypeString(int type)
+{
+ switch (type) {
+ case 0: return "Export";
+ case 1: return "Import";
+
+ default: return "Bad";
+ }
+}
+
+const char* FuncParamAttrString(int attr)
+{
+ switch (attr) {
+ case 0: return "Zext";
+ case 1: return "Sext";
+ case 2: return "ByVal";
+ case 3: return "Sret";
+ case 4: return "NoAlias";
+ case 5: return "NoCapture";
+ case 6: return "NoWrite";
+ case 7: return "NoReadWrite";
+
+ default: return "Bad";
+ }
+}
+
+const char* AccessQualifierString(int attr)
+{
+ switch (attr) {
+ case 0: return "ReadOnly";
+ case 1: return "WriteOnly";
+ case 2: return "ReadWrite";
+
+ default: return "Bad";
+ }
+}
+
+const int SelectControlCeiling = 2;
+
+const char* SelectControlString(int cont)
+{
+ switch (cont) {
+ case 0: return "Flatten";
+ case 1: return "DontFlatten";
+
+ case SelectControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const int LoopControlCeiling = 4;
+
+const char* LoopControlString(int cont)
+{
+ switch (cont) {
+ case 0: return "Unroll";
+ case 1: return "DontUnroll";
+ case 2: return "DependencyInfinite";
+ case 3: return "DependencyLength";
+
+ case LoopControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const int FunctionControlCeiling = 4;
+
+const char* FunctionControlString(int cont)
+{
+ switch (cont) {
+ case 0: return "Inline";
+ case 1: return "DontInline";
+ case 2: return "Pure";
+ case 3: return "Const";
+
+ case FunctionControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const char* MemorySemanticsString(int mem)
+{
+ // Note: No bits set (None) means "Relaxed"
+ switch (mem) {
+ case 0: return "Bad"; // Note: this is a placeholder for 'Consume'
+ case 1: return "Acquire";
+ case 2: return "Release";
+ case 3: return "AcquireRelease";
+ case 4: return "SequentiallyConsistent";
+ case 5: return "Bad"; // Note: reserved for future expansion
+ case 6: return "UniformMemory";
+ case 7: return "SubgroupMemory";
+ case 8: return "WorkgroupMemory";
+ case 9: return "CrossWorkgroupMemory";
+ case 10: return "AtomicCounterMemory";
+ case 11: return "ImageMemory";
+
+ default: return "Bad";
+ }
+}
+
+const int MemoryAccessCeiling = 6;
+
+const char* MemoryAccessString(int mem)
+{
+ switch (mem) {
+ case MemoryAccessVolatileShift: return "Volatile";
+ case MemoryAccessAlignedShift: return "Aligned";
+ case MemoryAccessNontemporalShift: return "Nontemporal";
+ case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR";
+ case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR";
+ case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR";
+
+ default: return "Bad";
+ }
+}
+
+const char* ScopeString(int mem)
+{
+ switch (mem) {
+ case 0: return "CrossDevice";
+ case 1: return "Device";
+ case 2: return "Workgroup";
+ case 3: return "Subgroup";
+ case 4: return "Invocation";
+
+ default: return "Bad";
+ }
+}
+
+const char* GroupOperationString(int gop)
+{
+
+ switch (gop)
+ {
+ case GroupOperationReduce: return "Reduce";
+ case GroupOperationInclusiveScan: return "InclusiveScan";
+ case GroupOperationExclusiveScan: return "ExclusiveScan";
+ case GroupOperationClusteredReduce: return "ClusteredReduce";
+#ifdef NV_EXTENSIONS
+ case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
+ case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
+ case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
+#endif
+
+ default: return "Bad";
+ }
+}
+
+const char* KernelEnqueueFlagsString(int flag)
+{
+ switch (flag)
+ {
+ case 0: return "NoWait";
+ case 1: return "WaitKernel";
+ case 2: return "WaitWorkGroup";
+
+ default: return "Bad";
+ }
+}
+
+const char* KernelProfilingInfoString(int info)
+{
+ switch (info)
+ {
+ case 0: return "CmdExecTime";
+
+ default: return "Bad";
+ }
+}
+
+const char* CapabilityString(int info)
+{
+ switch (info)
+ {
+ case 0: return "Matrix";
+ case 1: return "Shader";
+ case 2: return "Geometry";
+ case 3: return "Tessellation";
+ case 4: return "Addresses";
+ case 5: return "Linkage";
+ case 6: return "Kernel";
+ case 7: return "Vector16";
+ case 8: return "Float16Buffer";
+ case 9: return "Float16";
+ case 10: return "Float64";
+ case 11: return "Int64";
+ case 12: return "Int64Atomics";
+ case 13: return "ImageBasic";
+ case 14: return "ImageReadWrite";
+ case 15: return "ImageMipmap";
+ case 16: return "Bad";
+ case 17: return "Pipes";
+ case 18: return "Groups";
+ case 19: return "DeviceEnqueue";
+ case 20: return "LiteralSampler";
+ case 21: return "AtomicStorage";
+ case 22: return "Int16";
+ case 23: return "TessellationPointSize";
+ case 24: return "GeometryPointSize";
+ case 25: return "ImageGatherExtended";
+ case 26: return "Bad";
+ case 27: return "StorageImageMultisample";
+ case 28: return "UniformBufferArrayDynamicIndexing";
+ case 29: return "SampledImageArrayDynamicIndexing";
+ case 30: return "StorageBufferArrayDynamicIndexing";
+ case 31: return "StorageImageArrayDynamicIndexing";
+ case 32: return "ClipDistance";
+ case 33: return "CullDistance";
+ case 34: return "ImageCubeArray";
+ case 35: return "SampleRateShading";
+ case 36: return "ImageRect";
+ case 37: return "SampledRect";
+ case 38: return "GenericPointer";
+ case 39: return "Int8";
+ case 40: return "InputAttachment";
+ case 41: return "SparseResidency";
+ case 42: return "MinLod";
+ case 43: return "Sampled1D";
+ case 44: return "Image1D";
+ case 45: return "SampledCubeArray";
+ case 46: return "SampledBuffer";
+ case 47: return "ImageBuffer";
+ case 48: return "ImageMSArray";
+ case 49: return "StorageImageExtendedFormats";
+ case 50: return "ImageQuery";
+ case 51: return "DerivativeControl";
+ case 52: return "InterpolationFunction";
+ case 53: return "TransformFeedback";
+ case 54: return "GeometryStreams";
+ case 55: return "StorageImageReadWithoutFormat";
+ case 56: return "StorageImageWriteWithoutFormat";
+ case 57: return "MultiViewport";
+ case 61: return "GroupNonUniform";
+ case 62: return "GroupNonUniformVote";
+ case 63: return "GroupNonUniformArithmetic";
+ case 64: return "GroupNonUniformBallot";
+ case 65: return "GroupNonUniformShuffle";
+ case 66: return "GroupNonUniformShuffleRelative";
+ case 67: return "GroupNonUniformClustered";
+ case 68: return "GroupNonUniformQuad";
+
+ case CapabilitySubgroupBallotKHR: return "SubgroupBallotKHR";
+ case CapabilityDrawParameters: return "DrawParameters";
+ case CapabilitySubgroupVoteKHR: return "SubgroupVoteKHR";
+
+ case CapabilityStorageUniformBufferBlock16: return "StorageUniformBufferBlock16";
+ case CapabilityStorageUniform16: return "StorageUniform16";
+ case CapabilityStoragePushConstant16: return "StoragePushConstant16";
+ case CapabilityStorageInputOutput16: return "StorageInputOutput16";
+
+ case CapabilityStorageBuffer8BitAccess: return "CapabilityStorageBuffer8BitAccess";
+ case CapabilityUniformAndStorageBuffer8BitAccess: return "CapabilityUniformAndStorageBuffer8BitAccess";
+ case CapabilityStoragePushConstant8: return "CapabilityStoragePushConstant8";
+
+ case CapabilityDeviceGroup: return "DeviceGroup";
+ case CapabilityMultiView: return "MultiView";
+
+ case CapabilityStencilExportEXT: return "StencilExportEXT";
+
+#ifdef AMD_EXTENSIONS
+ case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
+ case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
+ case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
+ case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
+#endif
+
+ case CapabilityAtomicStorageOps: return "AtomicStorageOps";
+
+ case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
+#ifdef NV_EXTENSIONS
+ case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
+ case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
+ case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
+ case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
+ case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
+ case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV";
+ case CapabilityRayTracingNV: return "RayTracingNV";
+ case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV";
+ case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
+ case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
+ case CapabilityMeshShadingNV: return "MeshShadingNV";
+// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by CapabilityFragmentDensityEXT
+#endif
+ case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
+
+ case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
+
+ case CapabilityShaderNonUniformEXT: return "CapabilityShaderNonUniformEXT";
+ case CapabilityRuntimeDescriptorArrayEXT: return "CapabilityRuntimeDescriptorArrayEXT";
+ case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "CapabilityInputAttachmentArrayDynamicIndexingEXT";
+ case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "CapabilityUniformTexelBufferArrayDynamicIndexingEXT";
+ case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "CapabilityStorageTexelBufferArrayDynamicIndexingEXT";
+ case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "CapabilityUniformBufferArrayNonUniformIndexingEXT";
+ case CapabilitySampledImageArrayNonUniformIndexingEXT: return "CapabilitySampledImageArrayNonUniformIndexingEXT";
+ case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "CapabilityStorageBufferArrayNonUniformIndexingEXT";
+ case CapabilityStorageImageArrayNonUniformIndexingEXT: return "CapabilityStorageImageArrayNonUniformIndexingEXT";
+ case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "CapabilityInputAttachmentArrayNonUniformIndexingEXT";
+ case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
+ case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
+
+ case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
+ case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
+
+ case CapabilityPhysicalStorageBufferAddressesEXT: return "CapabilityPhysicalStorageBufferAddressesEXT";
+
+ case CapabilityVariablePointers: return "CapabilityVariablePointers";
+
+ case CapabilityCooperativeMatrixNV: return "CapabilityCooperativeMatrixNV";
+
+ default: return "Bad";
+ }
+}
+
+const char* OpcodeString(int op)
+{
+ switch (op) {
+ case 0: return "OpNop";
+ case 1: return "OpUndef";
+ case 2: return "OpSourceContinued";
+ case 3: return "OpSource";
+ case 4: return "OpSourceExtension";
+ case 5: return "OpName";
+ case 6: return "OpMemberName";
+ case 7: return "OpString";
+ case 8: return "OpLine";
+ case 9: return "Bad";
+ case 10: return "OpExtension";
+ case 11: return "OpExtInstImport";
+ case 12: return "OpExtInst";
+ case 13: return "Bad";
+ case 14: return "OpMemoryModel";
+ case 15: return "OpEntryPoint";
+ case 16: return "OpExecutionMode";
+ case 17: return "OpCapability";
+ case 18: return "Bad";
+ case 19: return "OpTypeVoid";
+ case 20: return "OpTypeBool";
+ case 21: return "OpTypeInt";
+ case 22: return "OpTypeFloat";
+ case 23: return "OpTypeVector";
+ case 24: return "OpTypeMatrix";
+ case 25: return "OpTypeImage";
+ case 26: return "OpTypeSampler";
+ case 27: return "OpTypeSampledImage";
+ case 28: return "OpTypeArray";
+ case 29: return "OpTypeRuntimeArray";
+ case 30: return "OpTypeStruct";
+ case 31: return "OpTypeOpaque";
+ case 32: return "OpTypePointer";
+ case 33: return "OpTypeFunction";
+ case 34: return "OpTypeEvent";
+ case 35: return "OpTypeDeviceEvent";
+ case 36: return "OpTypeReserveId";
+ case 37: return "OpTypeQueue";
+ case 38: return "OpTypePipe";
+ case 39: return "OpTypeForwardPointer";
+ case 40: return "Bad";
+ case 41: return "OpConstantTrue";
+ case 42: return "OpConstantFalse";
+ case 43: return "OpConstant";
+ case 44: return "OpConstantComposite";
+ case 45: return "OpConstantSampler";
+ case 46: return "OpConstantNull";
+ case 47: return "Bad";
+ case 48: return "OpSpecConstantTrue";
+ case 49: return "OpSpecConstantFalse";
+ case 50: return "OpSpecConstant";
+ case 51: return "OpSpecConstantComposite";
+ case 52: return "OpSpecConstantOp";
+ case 53: return "Bad";
+ case 54: return "OpFunction";
+ case 55: return "OpFunctionParameter";
+ case 56: return "OpFunctionEnd";
+ case 57: return "OpFunctionCall";
+ case 58: return "Bad";
+ case 59: return "OpVariable";
+ case 60: return "OpImageTexelPointer";
+ case 61: return "OpLoad";
+ case 62: return "OpStore";
+ case 63: return "OpCopyMemory";
+ case 64: return "OpCopyMemorySized";
+ case 65: return "OpAccessChain";
+ case 66: return "OpInBoundsAccessChain";
+ case 67: return "OpPtrAccessChain";
+ case 68: return "OpArrayLength";
+ case 69: return "OpGenericPtrMemSemantics";
+ case 70: return "OpInBoundsPtrAccessChain";
+ case 71: return "OpDecorate";
+ case 72: return "OpMemberDecorate";
+ case 73: return "OpDecorationGroup";
+ case 74: return "OpGroupDecorate";
+ case 75: return "OpGroupMemberDecorate";
+ case 76: return "Bad";
+ case 77: return "OpVectorExtractDynamic";
+ case 78: return "OpVectorInsertDynamic";
+ case 79: return "OpVectorShuffle";
+ case 80: return "OpCompositeConstruct";
+ case 81: return "OpCompositeExtract";
+ case 82: return "OpCompositeInsert";
+ case 83: return "OpCopyObject";
+ case 84: return "OpTranspose";
+ case 85: return "Bad";
+ case 86: return "OpSampledImage";
+ case 87: return "OpImageSampleImplicitLod";
+ case 88: return "OpImageSampleExplicitLod";
+ case 89: return "OpImageSampleDrefImplicitLod";
+ case 90: return "OpImageSampleDrefExplicitLod";
+ case 91: return "OpImageSampleProjImplicitLod";
+ case 92: return "OpImageSampleProjExplicitLod";
+ case 93: return "OpImageSampleProjDrefImplicitLod";
+ case 94: return "OpImageSampleProjDrefExplicitLod";
+ case 95: return "OpImageFetch";
+ case 96: return "OpImageGather";
+ case 97: return "OpImageDrefGather";
+ case 98: return "OpImageRead";
+ case 99: return "OpImageWrite";
+ case 100: return "OpImage";
+ case 101: return "OpImageQueryFormat";
+ case 102: return "OpImageQueryOrder";
+ case 103: return "OpImageQuerySizeLod";
+ case 104: return "OpImageQuerySize";
+ case 105: return "OpImageQueryLod";
+ case 106: return "OpImageQueryLevels";
+ case 107: return "OpImageQuerySamples";
+ case 108: return "Bad";
+ case 109: return "OpConvertFToU";
+ case 110: return "OpConvertFToS";
+ case 111: return "OpConvertSToF";
+ case 112: return "OpConvertUToF";
+ case 113: return "OpUConvert";
+ case 114: return "OpSConvert";
+ case 115: return "OpFConvert";
+ case 116: return "OpQuantizeToF16";
+ case 117: return "OpConvertPtrToU";
+ case 118: return "OpSatConvertSToU";
+ case 119: return "OpSatConvertUToS";
+ case 120: return "OpConvertUToPtr";
+ case 121: return "OpPtrCastToGeneric";
+ case 122: return "OpGenericCastToPtr";
+ case 123: return "OpGenericCastToPtrExplicit";
+ case 124: return "OpBitcast";
+ case 125: return "Bad";
+ case 126: return "OpSNegate";
+ case 127: return "OpFNegate";
+ case 128: return "OpIAdd";
+ case 129: return "OpFAdd";
+ case 130: return "OpISub";
+ case 131: return "OpFSub";
+ case 132: return "OpIMul";
+ case 133: return "OpFMul";
+ case 134: return "OpUDiv";
+ case 135: return "OpSDiv";
+ case 136: return "OpFDiv";
+ case 137: return "OpUMod";
+ case 138: return "OpSRem";
+ case 139: return "OpSMod";
+ case 140: return "OpFRem";
+ case 141: return "OpFMod";
+ case 142: return "OpVectorTimesScalar";
+ case 143: return "OpMatrixTimesScalar";
+ case 144: return "OpVectorTimesMatrix";
+ case 145: return "OpMatrixTimesVector";
+ case 146: return "OpMatrixTimesMatrix";
+ case 147: return "OpOuterProduct";
+ case 148: return "OpDot";
+ case 149: return "OpIAddCarry";
+ case 150: return "OpISubBorrow";
+ case 151: return "OpUMulExtended";
+ case 152: return "OpSMulExtended";
+ case 153: return "Bad";
+ case 154: return "OpAny";
+ case 155: return "OpAll";
+ case 156: return "OpIsNan";
+ case 157: return "OpIsInf";
+ case 158: return "OpIsFinite";
+ case 159: return "OpIsNormal";
+ case 160: return "OpSignBitSet";
+ case 161: return "OpLessOrGreater";
+ case 162: return "OpOrdered";
+ case 163: return "OpUnordered";
+ case 164: return "OpLogicalEqual";
+ case 165: return "OpLogicalNotEqual";
+ case 166: return "OpLogicalOr";
+ case 167: return "OpLogicalAnd";
+ case 168: return "OpLogicalNot";
+ case 169: return "OpSelect";
+ case 170: return "OpIEqual";
+ case 171: return "OpINotEqual";
+ case 172: return "OpUGreaterThan";
+ case 173: return "OpSGreaterThan";
+ case 174: return "OpUGreaterThanEqual";
+ case 175: return "OpSGreaterThanEqual";
+ case 176: return "OpULessThan";
+ case 177: return "OpSLessThan";
+ case 178: return "OpULessThanEqual";
+ case 179: return "OpSLessThanEqual";
+ case 180: return "OpFOrdEqual";
+ case 181: return "OpFUnordEqual";
+ case 182: return "OpFOrdNotEqual";
+ case 183: return "OpFUnordNotEqual";
+ case 184: return "OpFOrdLessThan";
+ case 185: return "OpFUnordLessThan";
+ case 186: return "OpFOrdGreaterThan";
+ case 187: return "OpFUnordGreaterThan";
+ case 188: return "OpFOrdLessThanEqual";
+ case 189: return "OpFUnordLessThanEqual";
+ case 190: return "OpFOrdGreaterThanEqual";
+ case 191: return "OpFUnordGreaterThanEqual";
+ case 192: return "Bad";
+ case 193: return "Bad";
+ case 194: return "OpShiftRightLogical";
+ case 195: return "OpShiftRightArithmetic";
+ case 196: return "OpShiftLeftLogical";
+ case 197: return "OpBitwiseOr";
+ case 198: return "OpBitwiseXor";
+ case 199: return "OpBitwiseAnd";
+ case 200: return "OpNot";
+ case 201: return "OpBitFieldInsert";
+ case 202: return "OpBitFieldSExtract";
+ case 203: return "OpBitFieldUExtract";
+ case 204: return "OpBitReverse";
+ case 205: return "OpBitCount";
+ case 206: return "Bad";
+ case 207: return "OpDPdx";
+ case 208: return "OpDPdy";
+ case 209: return "OpFwidth";
+ case 210: return "OpDPdxFine";
+ case 211: return "OpDPdyFine";
+ case 212: return "OpFwidthFine";
+ case 213: return "OpDPdxCoarse";
+ case 214: return "OpDPdyCoarse";
+ case 215: return "OpFwidthCoarse";
+ case 216: return "Bad";
+ case 217: return "Bad";
+ case 218: return "OpEmitVertex";
+ case 219: return "OpEndPrimitive";
+ case 220: return "OpEmitStreamVertex";
+ case 221: return "OpEndStreamPrimitive";
+ case 222: return "Bad";
+ case 223: return "Bad";
+ case 224: return "OpControlBarrier";
+ case 225: return "OpMemoryBarrier";
+ case 226: return "Bad";
+ case 227: return "OpAtomicLoad";
+ case 228: return "OpAtomicStore";
+ case 229: return "OpAtomicExchange";
+ case 230: return "OpAtomicCompareExchange";
+ case 231: return "OpAtomicCompareExchangeWeak";
+ case 232: return "OpAtomicIIncrement";
+ case 233: return "OpAtomicIDecrement";
+ case 234: return "OpAtomicIAdd";
+ case 235: return "OpAtomicISub";
+ case 236: return "OpAtomicSMin";
+ case 237: return "OpAtomicUMin";
+ case 238: return "OpAtomicSMax";
+ case 239: return "OpAtomicUMax";
+ case 240: return "OpAtomicAnd";
+ case 241: return "OpAtomicOr";
+ case 242: return "OpAtomicXor";
+ case 243: return "Bad";
+ case 244: return "Bad";
+ case 245: return "OpPhi";
+ case 246: return "OpLoopMerge";
+ case 247: return "OpSelectionMerge";
+ case 248: return "OpLabel";
+ case 249: return "OpBranch";
+ case 250: return "OpBranchConditional";
+ case 251: return "OpSwitch";
+ case 252: return "OpKill";
+ case 253: return "OpReturn";
+ case 254: return "OpReturnValue";
+ case 255: return "OpUnreachable";
+ case 256: return "OpLifetimeStart";
+ case 257: return "OpLifetimeStop";
+ case 258: return "Bad";
+ case 259: return "OpGroupAsyncCopy";
+ case 260: return "OpGroupWaitEvents";
+ case 261: return "OpGroupAll";
+ case 262: return "OpGroupAny";
+ case 263: return "OpGroupBroadcast";
+ case 264: return "OpGroupIAdd";
+ case 265: return "OpGroupFAdd";
+ case 266: return "OpGroupFMin";
+ case 267: return "OpGroupUMin";
+ case 268: return "OpGroupSMin";
+ case 269: return "OpGroupFMax";
+ case 270: return "OpGroupUMax";
+ case 271: return "OpGroupSMax";
+ case 272: return "Bad";
+ case 273: return "Bad";
+ case 274: return "OpReadPipe";
+ case 275: return "OpWritePipe";
+ case 276: return "OpReservedReadPipe";
+ case 277: return "OpReservedWritePipe";
+ case 278: return "OpReserveReadPipePackets";
+ case 279: return "OpReserveWritePipePackets";
+ case 280: return "OpCommitReadPipe";
+ case 281: return "OpCommitWritePipe";
+ case 282: return "OpIsValidReserveId";
+ case 283: return "OpGetNumPipePackets";
+ case 284: return "OpGetMaxPipePackets";
+ case 285: return "OpGroupReserveReadPipePackets";
+ case 286: return "OpGroupReserveWritePipePackets";
+ case 287: return "OpGroupCommitReadPipe";
+ case 288: return "OpGroupCommitWritePipe";
+ case 289: return "Bad";
+ case 290: return "Bad";
+ case 291: return "OpEnqueueMarker";
+ case 292: return "OpEnqueueKernel";
+ case 293: return "OpGetKernelNDrangeSubGroupCount";
+ case 294: return "OpGetKernelNDrangeMaxSubGroupSize";
+ case 295: return "OpGetKernelWorkGroupSize";
+ case 296: return "OpGetKernelPreferredWorkGroupSizeMultiple";
+ case 297: return "OpRetainEvent";
+ case 298: return "OpReleaseEvent";
+ case 299: return "OpCreateUserEvent";
+ case 300: return "OpIsValidEvent";
+ case 301: return "OpSetUserEventStatus";
+ case 302: return "OpCaptureEventProfilingInfo";
+ case 303: return "OpGetDefaultQueue";
+ case 304: return "OpBuildNDRange";
+ case 305: return "OpImageSparseSampleImplicitLod";
+ case 306: return "OpImageSparseSampleExplicitLod";
+ case 307: return "OpImageSparseSampleDrefImplicitLod";
+ case 308: return "OpImageSparseSampleDrefExplicitLod";
+ case 309: return "OpImageSparseSampleProjImplicitLod";
+ case 310: return "OpImageSparseSampleProjExplicitLod";
+ case 311: return "OpImageSparseSampleProjDrefImplicitLod";
+ case 312: return "OpImageSparseSampleProjDrefExplicitLod";
+ case 313: return "OpImageSparseFetch";
+ case 314: return "OpImageSparseGather";
+ case 315: return "OpImageSparseDrefGather";
+ case 316: return "OpImageSparseTexelsResident";
+ case 317: return "OpNoLine";
+ case 318: return "OpAtomicFlagTestAndSet";
+ case 319: return "OpAtomicFlagClear";
+ case 320: return "OpImageSparseRead";
+
+ case OpModuleProcessed: return "OpModuleProcessed";
+ case OpDecorateId: return "OpDecorateId";
+
+ case 333: return "OpGroupNonUniformElect";
+ case 334: return "OpGroupNonUniformAll";
+ case 335: return "OpGroupNonUniformAny";
+ case 336: return "OpGroupNonUniformAllEqual";
+ case 337: return "OpGroupNonUniformBroadcast";
+ case 338: return "OpGroupNonUniformBroadcastFirst";
+ case 339: return "OpGroupNonUniformBallot";
+ case 340: return "OpGroupNonUniformInverseBallot";
+ case 341: return "OpGroupNonUniformBallotBitExtract";
+ case 342: return "OpGroupNonUniformBallotBitCount";
+ case 343: return "OpGroupNonUniformBallotFindLSB";
+ case 344: return "OpGroupNonUniformBallotFindMSB";
+ case 345: return "OpGroupNonUniformShuffle";
+ case 346: return "OpGroupNonUniformShuffleXor";
+ case 347: return "OpGroupNonUniformShuffleUp";
+ case 348: return "OpGroupNonUniformShuffleDown";
+ case 349: return "OpGroupNonUniformIAdd";
+ case 350: return "OpGroupNonUniformFAdd";
+ case 351: return "OpGroupNonUniformIMul";
+ case 352: return "OpGroupNonUniformFMul";
+ case 353: return "OpGroupNonUniformSMin";
+ case 354: return "OpGroupNonUniformUMin";
+ case 355: return "OpGroupNonUniformFMin";
+ case 356: return "OpGroupNonUniformSMax";
+ case 357: return "OpGroupNonUniformUMax";
+ case 358: return "OpGroupNonUniformFMax";
+ case 359: return "OpGroupNonUniformBitwiseAnd";
+ case 360: return "OpGroupNonUniformBitwiseOr";
+ case 361: return "OpGroupNonUniformBitwiseXor";
+ case 362: return "OpGroupNonUniformLogicalAnd";
+ case 363: return "OpGroupNonUniformLogicalOr";
+ case 364: return "OpGroupNonUniformLogicalXor";
+ case 365: return "OpGroupNonUniformQuadBroadcast";
+ case 366: return "OpGroupNonUniformQuadSwap";
+
+ case 4421: return "OpSubgroupBallotKHR";
+ case 4422: return "OpSubgroupFirstInvocationKHR";
+ case 4428: return "OpSubgroupAllKHR";
+ case 4429: return "OpSubgroupAnyKHR";
+ case 4430: return "OpSubgroupAllEqualKHR";
+ case 4432: return "OpSubgroupReadInvocationKHR";
+
+#ifdef AMD_EXTENSIONS
+ case 5000: return "OpGroupIAddNonUniformAMD";
+ case 5001: return "OpGroupFAddNonUniformAMD";
+ case 5002: return "OpGroupFMinNonUniformAMD";
+ case 5003: return "OpGroupUMinNonUniformAMD";
+ case 5004: return "OpGroupSMinNonUniformAMD";
+ case 5005: return "OpGroupFMaxNonUniformAMD";
+ case 5006: return "OpGroupUMaxNonUniformAMD";
+ case 5007: return "OpGroupSMaxNonUniformAMD";
+
+ case 5011: return "OpFragmentMaskFetchAMD";
+ case 5012: return "OpFragmentFetchAMD";
+#endif
+
+ case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
+ case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
+
+#ifdef NV_EXTENSIONS
+ case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
+ case OpReportIntersectionNV: return "OpReportIntersectionNV";
+ case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
+ case OpTerminateRayNV: return "OpTerminateRayNV";
+ case OpTraceNV: return "OpTraceNV";
+ case OpTypeAccelerationStructureNV: return "OpTypeAccelerationStructureNV";
+ case OpExecuteCallableNV: return "OpExecuteCallableNV";
+ case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
+ case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
+#endif
+
+ case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
+ case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
+ case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV";
+ case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV";
+ case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV";
+
+ default:
+ return "Bad";
+ }
+}
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+InstructionParameters InstructionDesc[OpCodeMask + 1];
+OperandParameters ExecutionModeOperands[ExecutionModeCeiling];
+OperandParameters DecorationOperands[DecorationCeiling];
+
+EnumDefinition OperandClassParams[OperandCount];
+EnumParameters ExecutionModeParams[ExecutionModeCeiling];
+EnumParameters ImageOperandsParams[ImageOperandsCeiling];
+EnumParameters DecorationParams[DecorationCeiling];
+EnumParameters LoopControlParams[FunctionControlCeiling];
+EnumParameters SelectionControlParams[SelectControlCeiling];
+EnumParameters FunctionControlParams[FunctionControlCeiling];
+EnumParameters MemoryAccessParams[MemoryAccessCeiling];
+
+// Set up all the parameterizing descriptions of the opcodes, operands, etc.
+void Parameterize()
+{
+ // only do this once.
+ static bool initialized = false;
+ if (initialized)
+ return;
+ initialized = true;
+
+ // Exceptions to having a result <id> and a resulting type <id>.
+ // (Everything is initialized to have both).
+
+ InstructionDesc[OpNop].setResultAndType(false, false);
+ InstructionDesc[OpSource].setResultAndType(false, false);
+ InstructionDesc[OpSourceContinued].setResultAndType(false, false);
+ InstructionDesc[OpSourceExtension].setResultAndType(false, false);
+ InstructionDesc[OpExtension].setResultAndType(false, false);
+ InstructionDesc[OpExtInstImport].setResultAndType(true, false);
+ InstructionDesc[OpCapability].setResultAndType(false, false);
+ InstructionDesc[OpMemoryModel].setResultAndType(false, false);
+ InstructionDesc[OpEntryPoint].setResultAndType(false, false);
+ InstructionDesc[OpExecutionMode].setResultAndType(false, false);
+ InstructionDesc[OpTypeVoid].setResultAndType(true, false);
+ InstructionDesc[OpTypeBool].setResultAndType(true, false);
+ InstructionDesc[OpTypeInt].setResultAndType(true, false);
+ InstructionDesc[OpTypeFloat].setResultAndType(true, false);
+ InstructionDesc[OpTypeVector].setResultAndType(true, false);
+ InstructionDesc[OpTypeMatrix].setResultAndType(true, false);
+ InstructionDesc[OpTypeImage].setResultAndType(true, false);
+ InstructionDesc[OpTypeSampler].setResultAndType(true, false);
+ InstructionDesc[OpTypeSampledImage].setResultAndType(true, false);
+ InstructionDesc[OpTypeArray].setResultAndType(true, false);
+ InstructionDesc[OpTypeRuntimeArray].setResultAndType(true, false);
+ InstructionDesc[OpTypeStruct].setResultAndType(true, false);
+ InstructionDesc[OpTypeOpaque].setResultAndType(true, false);
+ InstructionDesc[OpTypePointer].setResultAndType(true, false);
+ InstructionDesc[OpTypeForwardPointer].setResultAndType(false, false);
+ InstructionDesc[OpTypeFunction].setResultAndType(true, false);
+ InstructionDesc[OpTypeEvent].setResultAndType(true, false);
+ InstructionDesc[OpTypeDeviceEvent].setResultAndType(true, false);
+ InstructionDesc[OpTypeReserveId].setResultAndType(true, false);
+ InstructionDesc[OpTypeQueue].setResultAndType(true, false);
+ InstructionDesc[OpTypePipe].setResultAndType(true, false);
+ InstructionDesc[OpFunctionEnd].setResultAndType(false, false);
+ InstructionDesc[OpStore].setResultAndType(false, false);
+ InstructionDesc[OpImageWrite].setResultAndType(false, false);
+ InstructionDesc[OpDecorationGroup].setResultAndType(true, false);
+ InstructionDesc[OpDecorate].setResultAndType(false, false);
+ InstructionDesc[OpDecorateId].setResultAndType(false, false);
+ InstructionDesc[OpDecorateStringGOOGLE].setResultAndType(false, false);
+ InstructionDesc[OpMemberDecorate].setResultAndType(false, false);
+ InstructionDesc[OpMemberDecorateStringGOOGLE].setResultAndType(false, false);
+ InstructionDesc[OpGroupDecorate].setResultAndType(false, false);
+ InstructionDesc[OpGroupMemberDecorate].setResultAndType(false, false);
+ InstructionDesc[OpName].setResultAndType(false, false);
+ InstructionDesc[OpMemberName].setResultAndType(false, false);
+ InstructionDesc[OpString].setResultAndType(true, false);
+ InstructionDesc[OpLine].setResultAndType(false, false);
+ InstructionDesc[OpNoLine].setResultAndType(false, false);
+ InstructionDesc[OpCopyMemory].setResultAndType(false, false);
+ InstructionDesc[OpCopyMemorySized].setResultAndType(false, false);
+ InstructionDesc[OpEmitVertex].setResultAndType(false, false);
+ InstructionDesc[OpEndPrimitive].setResultAndType(false, false);
+ InstructionDesc[OpEmitStreamVertex].setResultAndType(false, false);
+ InstructionDesc[OpEndStreamPrimitive].setResultAndType(false, false);
+ InstructionDesc[OpControlBarrier].setResultAndType(false, false);
+ InstructionDesc[OpMemoryBarrier].setResultAndType(false, false);
+ InstructionDesc[OpAtomicStore].setResultAndType(false, false);
+ InstructionDesc[OpLoopMerge].setResultAndType(false, false);
+ InstructionDesc[OpSelectionMerge].setResultAndType(false, false);
+ InstructionDesc[OpLabel].setResultAndType(true, false);
+ InstructionDesc[OpBranch].setResultAndType(false, false);
+ InstructionDesc[OpBranchConditional].setResultAndType(false, false);
+ InstructionDesc[OpSwitch].setResultAndType(false, false);
+ InstructionDesc[OpKill].setResultAndType(false, false);
+ InstructionDesc[OpReturn].setResultAndType(false, false);
+ InstructionDesc[OpReturnValue].setResultAndType(false, false);
+ InstructionDesc[OpUnreachable].setResultAndType(false, false);
+ InstructionDesc[OpLifetimeStart].setResultAndType(false, false);
+ InstructionDesc[OpLifetimeStop].setResultAndType(false, false);
+ InstructionDesc[OpCommitReadPipe].setResultAndType(false, false);
+ InstructionDesc[OpCommitWritePipe].setResultAndType(false, false);
+ InstructionDesc[OpGroupCommitWritePipe].setResultAndType(false, false);
+ InstructionDesc[OpGroupCommitReadPipe].setResultAndType(false, false);
+ InstructionDesc[OpCaptureEventProfilingInfo].setResultAndType(false, false);
+ InstructionDesc[OpSetUserEventStatus].setResultAndType(false, false);
+ InstructionDesc[OpRetainEvent].setResultAndType(false, false);
+ InstructionDesc[OpReleaseEvent].setResultAndType(false, false);
+ InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false);
+ InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false);
+ InstructionDesc[OpModuleProcessed].setResultAndType(false, false);
+ InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false);
+ InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false);
+
+ // Specific additional context-dependent operands
+
+ ExecutionModeOperands[ExecutionModeInvocations].push(OperandLiteralNumber, "'Number of <<Invocation,invocations>>'");
+
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'x size'");
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'y size'");
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'z size'");
+
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'x size'");
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'y size'");
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'z size'");
+
+ ExecutionModeOperands[ExecutionModeOutputVertices].push(OperandLiteralNumber, "'Vertex count'");
+ ExecutionModeOperands[ExecutionModeVecTypeHint].push(OperandLiteralNumber, "'Vector type'");
+
+ DecorationOperands[DecorationStream].push(OperandLiteralNumber, "'Stream Number'");
+ DecorationOperands[DecorationLocation].push(OperandLiteralNumber, "'Location'");
+ DecorationOperands[DecorationComponent].push(OperandLiteralNumber, "'Component'");
+ DecorationOperands[DecorationIndex].push(OperandLiteralNumber, "'Index'");
+ DecorationOperands[DecorationBinding].push(OperandLiteralNumber, "'Binding Point'");
+ DecorationOperands[DecorationDescriptorSet].push(OperandLiteralNumber, "'Descriptor Set'");
+ DecorationOperands[DecorationOffset].push(OperandLiteralNumber, "'Byte Offset'");
+ DecorationOperands[DecorationXfbBuffer].push(OperandLiteralNumber, "'XFB Buffer Number'");
+ DecorationOperands[DecorationXfbStride].push(OperandLiteralNumber, "'XFB Stride'");
+ DecorationOperands[DecorationArrayStride].push(OperandLiteralNumber, "'Array Stride'");
+ DecorationOperands[DecorationMatrixStride].push(OperandLiteralNumber, "'Matrix Stride'");
+ DecorationOperands[DecorationBuiltIn].push(OperandLiteralNumber, "See <<BuiltIn,*BuiltIn*>>");
+ DecorationOperands[DecorationFPRoundingMode].push(OperandFPRoundingMode, "'Floating-Point Rounding Mode'");
+ DecorationOperands[DecorationFPFastMathMode].push(OperandFPFastMath, "'Fast-Math Mode'");
+ DecorationOperands[DecorationLinkageAttributes].push(OperandLiteralString, "'Name'");
+ DecorationOperands[DecorationLinkageAttributes].push(OperandLinkageType, "'Linkage Type'");
+ DecorationOperands[DecorationFuncParamAttr].push(OperandFuncParamAttr, "'Function Parameter Attribute'");
+ DecorationOperands[DecorationSpecId].push(OperandLiteralNumber, "'Specialization Constant ID'");
+ DecorationOperands[DecorationInputAttachmentIndex].push(OperandLiteralNumber, "'Attachment Index'");
+ DecorationOperands[DecorationAlignment].push(OperandLiteralNumber, "'Alignment'");
+
+ OperandClassParams[OperandSource].set(0, SourceString, 0);
+ OperandClassParams[OperandExecutionModel].set(0, ExecutionModelString, nullptr);
+ OperandClassParams[OperandAddressing].set(0, AddressingString, nullptr);
+ OperandClassParams[OperandMemory].set(0, MemoryString, nullptr);
+ OperandClassParams[OperandExecutionMode].set(ExecutionModeCeiling, ExecutionModeString, ExecutionModeParams);
+ OperandClassParams[OperandExecutionMode].setOperands(ExecutionModeOperands);
+ OperandClassParams[OperandStorage].set(0, StorageClassString, nullptr);
+ OperandClassParams[OperandDimensionality].set(0, DimensionString, nullptr);
+ OperandClassParams[OperandSamplerAddressingMode].set(0, SamplerAddressingModeString, nullptr);
+ OperandClassParams[OperandSamplerFilterMode].set(0, SamplerFilterModeString, nullptr);
+ OperandClassParams[OperandSamplerImageFormat].set(0, ImageFormatString, nullptr);
+ OperandClassParams[OperandImageChannelOrder].set(0, ImageChannelOrderString, nullptr);
+ OperandClassParams[OperandImageChannelDataType].set(0, ImageChannelDataTypeString, nullptr);
+ OperandClassParams[OperandImageOperands].set(ImageOperandsCeiling, ImageOperandsString, ImageOperandsParams, true);
+ OperandClassParams[OperandFPFastMath].set(0, FPFastMathString, nullptr, true);
+ OperandClassParams[OperandFPRoundingMode].set(0, FPRoundingModeString, nullptr);
+ OperandClassParams[OperandLinkageType].set(0, LinkageTypeString, nullptr);
+ OperandClassParams[OperandFuncParamAttr].set(0, FuncParamAttrString, nullptr);
+ OperandClassParams[OperandAccessQualifier].set(0, AccessQualifierString, nullptr);
+ OperandClassParams[OperandDecoration].set(DecorationCeiling, DecorationString, DecorationParams);
+ OperandClassParams[OperandDecoration].setOperands(DecorationOperands);
+ OperandClassParams[OperandBuiltIn].set(0, BuiltInString, nullptr);
+ OperandClassParams[OperandSelect].set(SelectControlCeiling, SelectControlString, SelectionControlParams, true);
+ OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
+ OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
+ OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true);
+ OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
+ OperandClassParams[OperandScope].set(0, ScopeString, nullptr);
+ OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr);
+ OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr);
+ OperandClassParams[OperandKernelProfilingInfo].set(0, KernelProfilingInfoString, nullptr, true);
+ OperandClassParams[OperandCapability].set(0, CapabilityString, nullptr);
+ OperandClassParams[OperandOpcode].set(OpCodeMask + 1, OpcodeString, 0);
+
+ // set name of operator, an initial set of <id> style operands, and the description
+
+ InstructionDesc[OpSource].operands.push(OperandSource, "");
+ InstructionDesc[OpSource].operands.push(OperandLiteralNumber, "'Version'");
+ InstructionDesc[OpSource].operands.push(OperandId, "'File'", true);
+ InstructionDesc[OpSource].operands.push(OperandLiteralString, "'Source'", true);
+
+ InstructionDesc[OpSourceContinued].operands.push(OperandLiteralString, "'Continued Source'");
+
+ InstructionDesc[OpSourceExtension].operands.push(OperandLiteralString, "'Extension'");
+
+ InstructionDesc[OpName].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpName].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpMemberName].operands.push(OperandId, "'Type'");
+ InstructionDesc[OpMemberName].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberName].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpString].operands.push(OperandLiteralString, "'String'");
+
+ InstructionDesc[OpLine].operands.push(OperandId, "'File'");
+ InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Line'");
+ InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Column'");
+
+ InstructionDesc[OpExtension].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpExtInstImport].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpCapability].operands.push(OperandCapability, "'Capability'");
+
+ InstructionDesc[OpMemoryModel].operands.push(OperandAddressing, "");
+ InstructionDesc[OpMemoryModel].operands.push(OperandMemory, "");
+
+ InstructionDesc[OpEntryPoint].operands.push(OperandExecutionModel, "");
+ InstructionDesc[OpEntryPoint].operands.push(OperandId, "'Entry Point'");
+ InstructionDesc[OpEntryPoint].operands.push(OperandLiteralString, "'Name'");
+ InstructionDesc[OpEntryPoint].operands.push(OperandVariableIds, "'Interface'");
+
+ InstructionDesc[OpExecutionMode].operands.push(OperandId, "'Entry Point'");
+ InstructionDesc[OpExecutionMode].operands.push(OperandExecutionMode, "'Mode'");
+ InstructionDesc[OpExecutionMode].operands.push(OperandOptionalLiteral, "See <<Execution_Mode,Execution Mode>>");
+
+ InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Width'");
+ InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Signedness'");
+
+ InstructionDesc[OpTypeFloat].operands.push(OperandLiteralNumber, "'Width'");
+
+ InstructionDesc[OpTypeVector].operands.push(OperandId, "'Component Type'");
+ InstructionDesc[OpTypeVector].operands.push(OperandLiteralNumber, "'Component Count'");
+
+ InstructionDesc[OpTypeMatrix].operands.push(OperandId, "'Column Type'");
+ InstructionDesc[OpTypeMatrix].operands.push(OperandLiteralNumber, "'Column Count'");
+
+ InstructionDesc[OpTypeImage].operands.push(OperandId, "'Sampled Type'");
+ InstructionDesc[OpTypeImage].operands.push(OperandDimensionality, "");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Depth'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Arrayed'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'MS'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Sampled'");
+ InstructionDesc[OpTypeImage].operands.push(OperandSamplerImageFormat, "");
+ InstructionDesc[OpTypeImage].operands.push(OperandAccessQualifier, "", true);
+
+ InstructionDesc[OpTypeSampledImage].operands.push(OperandId, "'Image Type'");
+
+ InstructionDesc[OpTypeArray].operands.push(OperandId, "'Element Type'");
+ InstructionDesc[OpTypeArray].operands.push(OperandId, "'Length'");
+
+ InstructionDesc[OpTypeRuntimeArray].operands.push(OperandId, "'Element Type'");
+
+ InstructionDesc[OpTypeStruct].operands.push(OperandVariableIds, "'Member 0 type', +\n'member 1 type', +\n...");
+
+ InstructionDesc[OpTypeOpaque].operands.push(OperandLiteralString, "The name of the opaque type.");
+
+ InstructionDesc[OpTypePointer].operands.push(OperandStorage, "");
+ InstructionDesc[OpTypePointer].operands.push(OperandId, "'Type'");
+
+ InstructionDesc[OpTypeForwardPointer].operands.push(OperandId, "'Pointer Type'");
+ InstructionDesc[OpTypeForwardPointer].operands.push(OperandStorage, "");
+
+ InstructionDesc[OpTypePipe].operands.push(OperandAccessQualifier, "'Qualifier'");
+
+ InstructionDesc[OpTypeFunction].operands.push(OperandId, "'Return Type'");
+ InstructionDesc[OpTypeFunction].operands.push(OperandVariableIds, "'Parameter 0 Type', +\n'Parameter 1 Type', +\n...");
+
+ InstructionDesc[OpConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+ InstructionDesc[OpConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpConstantSampler].operands.push(OperandSamplerAddressingMode, "");
+ InstructionDesc[OpConstantSampler].operands.push(OperandLiteralNumber, "'Param'");
+ InstructionDesc[OpConstantSampler].operands.push(OperandSamplerFilterMode, "");
+
+ InstructionDesc[OpSpecConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+ InstructionDesc[OpSpecConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpSpecConstantOp].operands.push(OperandLiteralNumber, "'Opcode'");
+ InstructionDesc[OpSpecConstantOp].operands.push(OperandVariableIds, "'Operands'");
+
+ InstructionDesc[OpVariable].operands.push(OperandStorage, "");
+ InstructionDesc[OpVariable].operands.push(OperandId, "'Initializer'", true);
+
+ InstructionDesc[OpFunction].operands.push(OperandFunction, "");
+ InstructionDesc[OpFunction].operands.push(OperandId, "'Function Type'");
+
+ InstructionDesc[OpFunctionCall].operands.push(OperandId, "'Function'");
+ InstructionDesc[OpFunctionCall].operands.push(OperandVariableIds, "'Argument 0', +\n'Argument 1', +\n...");
+
+ InstructionDesc[OpExtInst].operands.push(OperandId, "'Set'");
+ InstructionDesc[OpExtInst].operands.push(OperandLiteralNumber, "'Instruction'");
+ InstructionDesc[OpExtInst].operands.push(OperandVariableIds, "'Operand 1', +\n'Operand 2', +\n...");
+
+ InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
+ InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpLoad].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
+ InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpStore].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");
+
+ InstructionDesc[OpDecorate].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorate].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpDecorateId].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorateId].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorateId].operands.push(OperandVariableIds, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'");
+
+ InstructionDesc[OpMemberDecorate].operands.push(OperandId, "'Structure Type'");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandDecoration, "");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandId, "'Structure Type'");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandDecoration, "");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'");
+
+ InstructionDesc[OpGroupDecorate].operands.push(OperandId, "'Decoration Group'");
+ InstructionDesc[OpGroupDecorate].operands.push(OperandVariableIds, "'Targets'");
+
+ InstructionDesc[OpGroupMemberDecorate].operands.push(OperandId, "'Decoration Group'");
+ InstructionDesc[OpGroupMemberDecorate].operands.push(OperandVariableIdLiteral, "'Targets'");
+
+ InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 2'");
+ InstructionDesc[OpVectorShuffle].operands.push(OperandVariableLiterals, "'Components'");
+
+ InstructionDesc[OpCompositeConstruct].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpCompositeExtract].operands.push(OperandId, "'Composite'");
+ InstructionDesc[OpCompositeExtract].operands.push(OperandVariableLiterals, "'Indexes'");
+
+ InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Composite'");
+ InstructionDesc[OpCompositeInsert].operands.push(OperandVariableLiterals, "'Indexes'");
+
+ InstructionDesc[OpCopyObject].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpCopyMemory].operands.push(OperandMemoryAccess, "", true);
+
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Size'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandMemoryAccess, "", true);
+
+ InstructionDesc[OpSampledImage].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpSampledImage].operands.push(OperandId, "'Sampler'");
+
+ InstructionDesc[OpImage].operands.push(OperandId, "'Sampled Image'");
+
+ InstructionDesc[OpImageRead].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageRead].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageRead].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageRead].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Texel'");
+ InstructionDesc[OpImageWrite].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageWrite].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageFetch].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageFetch].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageFetch].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageFetch].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpImageGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageDrefGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseRead].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseRead].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseTexelsResident].operands.push(OperandId, "'Resident Code'");
+
+ InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Level of Detail'");
+
+ InstructionDesc[OpImageQuerySize].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Coordinate'");
+
+ InstructionDesc[OpImageQueryLevels].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQuerySamples].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryFormat].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryOrder].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpInBoundsAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpInBoundsAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Element'");
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Element'");
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpSNegate].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpFNegate].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpNot].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpAny].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpAll].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpConvertFToU].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpConvertFToS].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpConvertSToF].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpConvertUToF].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpUConvert].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpSConvert].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpFConvert].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpSatConvertSToU].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpSatConvertUToS].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpConvertPtrToU].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpConvertUToPtr].operands.push(OperandId, "'Integer Value'");
+
+ InstructionDesc[OpPtrCastToGeneric].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpGenericCastToPtr].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandStorage, "'Storage'");
+
+ InstructionDesc[OpGenericPtrMemSemantics].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpBitcast].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpQuantizeToF16].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'");
+
+ InstructionDesc[OpIsNan].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsInf].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsFinite].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsNormal].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpSignBitSet].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'x'");
+ InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpOrdered].operands.push(OperandId, "'x'");
+ InstructionDesc[OpOrdered].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpUnordered].operands.push(OperandId, "'x'");
+ InstructionDesc[OpUnordered].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpArrayLength].operands.push(OperandId, "'Structure'");
+ InstructionDesc[OpArrayLength].operands.push(OperandLiteralNumber, "'Array member'");
+
+ InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpISub].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpISub].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Scalar'");
+
+ InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Matrix'");
+ InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Scalar'");
+
+ InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Matrix'");
+
+ InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Matrix'");
+ InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'LeftMatrix'");
+ InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'RightMatrix'");
+
+ InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 2'");
+
+ InstructionDesc[OpDot].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpDot].operands.push(OperandId, "'Vector 2'");
+
+ InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalNot].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Insert'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitReverse].operands.push(OperandId, "'Base'");
+
+ InstructionDesc[OpBitCount].operands.push(OperandId, "'Base'");
+
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Condition'");
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Object 1'");
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Object 2'");
+
+ InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpDPdx].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdy].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidth].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdxFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdyFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidthFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdxCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdyCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidthCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpEmitStreamVertex].operands.push(OperandId, "'Stream'");
+
+ InstructionDesc[OpEndStreamPrimitive].operands.push(OperandId, "'Stream'");
+
+ InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Memory'");
+ InstructionDesc[OpControlBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpMemoryBarrier].operands.push(OperandScope, "'Memory'");
+ InstructionDesc[OpMemoryBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Sample'");
+
+ InstructionDesc[OpAtomicLoad].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicLoad].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicLoad].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Equal'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Unequal'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Comparator'");
+
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Equal'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Unequal'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Comparator'");
+
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'");
+ InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'");
+ InstructionDesc[OpLoopMerge].operands.push(OperandLoop, "");
+ InstructionDesc[OpLoopMerge].operands.push(OperandOptionalLiteral, "");
+
+ InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'");
+ InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, "");
+
+ InstructionDesc[OpBranch].operands.push(OperandId, "'Target Label'");
+
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'Condition'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'True Label'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'False Label'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandVariableLiterals, "'Branch weights'");
+
+ InstructionDesc[OpSwitch].operands.push(OperandId, "'Selector'");
+ InstructionDesc[OpSwitch].operands.push(OperandId, "'Default'");
+ InstructionDesc[OpSwitch].operands.push(OperandVariableLiteralId, "'Target'");
+
+
+ InstructionDesc[OpReturnValue].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpLifetimeStart].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLifetimeStart].operands.push(OperandLiteralNumber, "'Size'");
+
+ InstructionDesc[OpLifetimeStop].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLifetimeStop].operands.push(OperandLiteralNumber, "'Size'");
+
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Destination'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Num Elements'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Events List'");
+
+ InstructionDesc[OpGroupAll].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAll].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpGroupAny].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAny].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'LocalId'");
+
+ InstructionDesc[OpGroupIAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupIAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupIAdd].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupFAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFAdd].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupUMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMin].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupSMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMin].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMin].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupUMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupSMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Index'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Index'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpIsValidReserveId].operands.push(OperandId, "'Reserve Id'");
+
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkSize'");
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'LocalWorkSize'");
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkOffset'");
+
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Event'");
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Profiling Info'");
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Event'");
+ InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Status'");
+
+ InstructionDesc[OpIsValidEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpRetainEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpReleaseEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Queue'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Flags'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Wait Events'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Ret Event'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Align'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandVariableIds, "'Local Size'");
+
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Queue'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'");
+
+ InstructionDesc[OpGroupNonUniformElect].operands.push(OperandScope, "'Execution'");
+
+ InstructionDesc[OpGroupNonUniformAll].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAll].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformAny].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAny].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "ID");
+
+ InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "Bit");
+
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "'Id'");
+
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "Mask");
+
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "Offset");
+
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "Offset");
+
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "'Id'");
+
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandLiteralNumber, "'Direction'");
+
+ InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupAllKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAllKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
+
+#ifdef AMD_EXTENSIONS
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Coordinate'");
+
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
+#endif
+
+#ifdef NV_EXTENSIONS
+ InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
+
+ InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
+
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'NV Acceleration Structure'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Flags'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Cull Mask'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Offset'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Stride'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Miss Index'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Origin'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMin'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Direction'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMax'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Payload'");
+ InstructionDesc[OpTraceNV].setResultAndType(false, false);
+
+ InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Parameter'");
+ InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Kind'");
+
+ InstructionDesc[OpIgnoreIntersectionNV].setResultAndType(false, false);
+
+ InstructionDesc[OpTerminateRayNV].setResultAndType(false, false);
+
+ InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "SBT Record Index");
+ InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "CallableData ID");
+ InstructionDesc[OpExecuteCallableNV].setResultAndType(false, false);
+
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Granularity'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coarse'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
+ InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
+#endif
+
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'");
+
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'");
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'");
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'");
+
+ InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'");
+}
+
+}; // end spv namespace
diff --git a/src/3rdparty/glslang/SPIRV/doc.h b/src/3rdparty/glslang/SPIRV/doc.h
new file mode 100644
index 0000000..293256a
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/doc.h
@@ -0,0 +1,258 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Parameterize the SPIR-V enumerants.
+//
+
+#pragma once
+
+#include "spirv.hpp"
+
+#include <vector>
+
+namespace spv {
+
+// Fill in all the parameters
+void Parameterize();
+
+// Return the English names of all the enums.
+const char* SourceString(int);
+const char* AddressingString(int);
+const char* MemoryString(int);
+const char* ExecutionModelString(int);
+const char* ExecutionModeString(int);
+const char* StorageClassString(int);
+const char* DecorationString(int);
+const char* BuiltInString(int);
+const char* DimensionString(int);
+const char* SelectControlString(int);
+const char* LoopControlString(int);
+const char* FunctionControlString(int);
+const char* SamplerAddressingModeString(int);
+const char* SamplerFilterModeString(int);
+const char* ImageFormatString(int);
+const char* ImageChannelOrderString(int);
+const char* ImageChannelTypeString(int);
+const char* ImageChannelDataTypeString(int type);
+const char* ImageOperandsString(int format);
+const char* ImageOperands(int);
+const char* FPFastMathString(int);
+const char* FPRoundingModeString(int);
+const char* LinkageTypeString(int);
+const char* FuncParamAttrString(int);
+const char* AccessQualifierString(int);
+const char* MemorySemanticsString(int);
+const char* MemoryAccessString(int);
+const char* ExecutionScopeString(int);
+const char* GroupOperationString(int);
+const char* KernelEnqueueFlagsString(int);
+const char* KernelProfilingInfoString(int);
+const char* CapabilityString(int);
+const char* OpcodeString(int);
+const char* ScopeString(int mem);
+
+// For grouping opcodes into subsections
+enum OpcodeClass {
+ OpClassMisc,
+ OpClassDebug,
+ OpClassAnnotate,
+ OpClassExtension,
+ OpClassMode,
+ OpClassType,
+ OpClassConstant,
+ OpClassMemory,
+ OpClassFunction,
+ OpClassImage,
+ OpClassConvert,
+ OpClassComposite,
+ OpClassArithmetic,
+ OpClassBit,
+ OpClassRelationalLogical,
+ OpClassDerivative,
+ OpClassFlowControl,
+ OpClassAtomic,
+ OpClassPrimitive,
+ OpClassBarrier,
+ OpClassGroup,
+ OpClassDeviceSideEnqueue,
+ OpClassPipe,
+
+ OpClassCount,
+ OpClassMissing // all instructions start out as missing
+};
+
+// For parameterizing operands.
+enum OperandClass {
+ OperandNone,
+ OperandId,
+ OperandVariableIds,
+ OperandOptionalLiteral,
+ OperandOptionalLiteralString,
+ OperandVariableLiterals,
+ OperandVariableIdLiteral,
+ OperandVariableLiteralId,
+ OperandLiteralNumber,
+ OperandLiteralString,
+ OperandSource,
+ OperandExecutionModel,
+ OperandAddressing,
+ OperandMemory,
+ OperandExecutionMode,
+ OperandStorage,
+ OperandDimensionality,
+ OperandSamplerAddressingMode,
+ OperandSamplerFilterMode,
+ OperandSamplerImageFormat,
+ OperandImageChannelOrder,
+ OperandImageChannelDataType,
+ OperandImageOperands,
+ OperandFPFastMath,
+ OperandFPRoundingMode,
+ OperandLinkageType,
+ OperandAccessQualifier,
+ OperandFuncParamAttr,
+ OperandDecoration,
+ OperandBuiltIn,
+ OperandSelect,
+ OperandLoop,
+ OperandFunction,
+ OperandMemorySemantics,
+ OperandMemoryAccess,
+ OperandScope,
+ OperandGroupOperation,
+ OperandKernelEnqueueFlags,
+ OperandKernelProfilingInfo,
+ OperandCapability,
+
+ OperandOpcode,
+
+ OperandCount
+};
+
+// Any specific enum can have a set of capabilities that allow it:
+typedef std::vector<Capability> EnumCaps;
+
+// Parameterize a set of operands with their OperandClass(es) and descriptions.
+class OperandParameters {
+public:
+ OperandParameters() { }
+ void push(OperandClass oc, const char* d, bool opt = false)
+ {
+ opClass.push_back(oc);
+ desc.push_back(d);
+ optional.push_back(opt);
+ }
+ void setOptional();
+ OperandClass getClass(int op) const { return opClass[op]; }
+ const char* getDesc(int op) const { return desc[op]; }
+ bool isOptional(int op) const { return optional[op]; }
+ int getNum() const { return (int)opClass.size(); }
+
+protected:
+ std::vector<OperandClass> opClass;
+ std::vector<const char*> desc;
+ std::vector<bool> optional;
+};
+
+// Parameterize an enumerant
+class EnumParameters {
+public:
+ EnumParameters() : desc(0) { }
+ const char* desc;
+};
+
+// Parameterize a set of enumerants that form an enum
+class EnumDefinition : public EnumParameters {
+public:
+ EnumDefinition() :
+ ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { }
+ void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false)
+ {
+ ceiling = ceil;
+ getName = name;
+ bitmask = mask;
+ enumParams = ep;
+ }
+ void setOperands(OperandParameters* op) { operandParams = op; }
+ int ceiling; // ceiling of enumerants
+ bool bitmask; // true if these enumerants combine into a bitmask
+ const char* (*getName)(int); // a function that returns the name for each enumerant value (or shift)
+ EnumParameters* enumParams; // parameters for each individual enumerant
+ OperandParameters* operandParams; // sets of operands
+};
+
+// Parameterize an instruction's logical format, including its known set of operands,
+// per OperandParameters above.
+class InstructionParameters {
+public:
+ InstructionParameters() :
+ opDesc("TBD"),
+ opClass(OpClassMissing),
+ typePresent(true), // most normal, only exceptions have to be spelled out
+ resultPresent(true) // most normal, only exceptions have to be spelled out
+ { }
+
+ void setResultAndType(bool r, bool t)
+ {
+ resultPresent = r;
+ typePresent = t;
+ }
+
+ bool hasResult() const { return resultPresent != 0; }
+ bool hasType() const { return typePresent != 0; }
+
+ const char* opDesc;
+ OpcodeClass opClass;
+ OperandParameters operands;
+
+protected:
+ int typePresent : 1;
+ int resultPresent : 1;
+};
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+extern InstructionParameters InstructionDesc[];
+
+// These hold definitions of the enumerants used for operands
+extern EnumDefinition OperandClassParams[];
+
+const char* GetOperandDesc(OperandClass operand);
+void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false);
+const char* AccessQualifierString(int attr);
+
+void PrintOperands(const OperandParameters& operands, int reservedOperands);
+
+} // end namespace spv
diff --git a/src/3rdparty/glslang/SPIRV/hex_float.h b/src/3rdparty/glslang/SPIRV/hex_float.h
new file mode 100644
index 0000000..905b21a
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/hex_float.h
@@ -0,0 +1,1078 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_HEX_FLOAT_H_
+#define LIBSPIRV_UTIL_HEX_FLOAT_H_
+
+#include <cassert>
+#include <cctype>
+#include <cmath>
+#include <cstdint>
+#include <iomanip>
+#include <limits>
+#include <sstream>
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+namespace std {
+bool isnan(double f)
+{
+ return ::_isnan(f) != 0;
+}
+bool isinf(double f)
+{
+ return ::_finite(f) == 0;
+}
+}
+#endif
+
+#include "bitutils.h"
+
+namespace spvutils {
+
+class Float16 {
+ public:
+ Float16(uint16_t v) : val(v) {}
+ Float16() {}
+ static bool isNan(const Float16& val) {
+ return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) != 0);
+ }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(const Float16& val) {
+ return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) == 0);
+ }
+ Float16(const Float16& other) { val = other.val; }
+ uint16_t get_value() const { return val; }
+
+ // Returns the maximum normal value.
+ static Float16 max() { return Float16(0x7bff); }
+ // Returns the lowest normal value.
+ static Float16 lowest() { return Float16(0xfbff); }
+
+ private:
+ uint16_t val;
+};
+
+// To specialize this type, you must override uint_type to define
+// an unsigned integer that can fit your floating point type.
+// You must also add a isNan function that returns true if
+// a value is Nan.
+template <typename T>
+struct FloatProxyTraits {
+ typedef void uint_type;
+};
+
+template <>
+struct FloatProxyTraits<float> {
+ typedef uint32_t uint_type;
+ static bool isNan(float f) { return std::isnan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(float f) { return std::isinf(f); }
+ // Returns the maximum normal value.
+ static float max() { return std::numeric_limits<float>::max(); }
+ // Returns the lowest normal value.
+ static float lowest() { return std::numeric_limits<float>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<double> {
+ typedef uint64_t uint_type;
+ static bool isNan(double f) { return std::isnan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(double f) { return std::isinf(f); }
+ // Returns the maximum normal value.
+ static double max() { return std::numeric_limits<double>::max(); }
+ // Returns the lowest normal value.
+ static double lowest() { return std::numeric_limits<double>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<Float16> {
+ typedef uint16_t uint_type;
+ static bool isNan(Float16 f) { return Float16::isNan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(Float16 f) { return Float16::isInfinity(f); }
+ // Returns the maximum normal value.
+ static Float16 max() { return Float16::max(); }
+ // Returns the lowest normal value.
+ static Float16 lowest() { return Float16::lowest(); }
+};
+
+// Since copying a floating point number (especially if it is NaN)
+// does not guarantee that bits are preserved, this class lets us
+// store the type and use it as a float when necessary.
+template <typename T>
+class FloatProxy {
+ public:
+ typedef typename FloatProxyTraits<T>::uint_type uint_type;
+
+ // Since this is to act similar to the normal floats,
+ // do not initialize the data by default.
+ FloatProxy() {}
+
+ // Intentionally non-explicit. This is a proxy type so
+ // implicit conversions allow us to use it more transparently.
+ FloatProxy(T val) { data_ = BitwiseCast<uint_type>(val); }
+
+ // Intentionally non-explicit. This is a proxy type so
+ // implicit conversions allow us to use it more transparently.
+ FloatProxy(uint_type val) { data_ = val; }
+
+ // This is helpful to have and is guaranteed not to stomp bits.
+ FloatProxy<T> operator-() const {
+ return static_cast<uint_type>(data_ ^
+ (uint_type(0x1) << (sizeof(T) * 8 - 1)));
+ }
+
+ // Returns the data as a floating point value.
+ T getAsFloat() const { return BitwiseCast<T>(data_); }
+
+ // Returns the raw data.
+ uint_type data() const { return data_; }
+
+ // Returns true if the value represents any type of NaN.
+ bool isNan() { return FloatProxyTraits<T>::isNan(getAsFloat()); }
+ // Returns true if the value represents any type of infinity.
+ bool isInfinity() { return FloatProxyTraits<T>::isInfinity(getAsFloat()); }
+
+ // Returns the maximum normal value.
+ static FloatProxy<T> max() {
+ return FloatProxy<T>(FloatProxyTraits<T>::max());
+ }
+ // Returns the lowest normal value.
+ static FloatProxy<T> lowest() {
+ return FloatProxy<T>(FloatProxyTraits<T>::lowest());
+ }
+
+ private:
+ uint_type data_;
+};
+
+template <typename T>
+bool operator==(const FloatProxy<T>& first, const FloatProxy<T>& second) {
+ return first.data() == second.data();
+}
+
+// Reads a FloatProxy value as a normal float from a stream.
+template <typename T>
+std::istream& operator>>(std::istream& is, FloatProxy<T>& value) {
+ T float_val;
+ is >> float_val;
+ value = FloatProxy<T>(float_val);
+ return is;
+}
+
+// This is an example traits. It is not meant to be used in practice, but will
+// be the default for any non-specialized type.
+template <typename T>
+struct HexFloatTraits {
+ // Integer type that can store this hex-float.
+ typedef void uint_type;
+ // Signed integer type that can store this hex-float.
+ typedef void int_type;
+ // The numerical type that this HexFloat represents.
+ typedef void underlying_type;
+ // The type needed to construct the underlying type.
+ typedef void native_type;
+ // The number of bits that are actually relevant in the uint_type.
+ // This allows us to deal with, for example, 24-bit values in a 32-bit
+ // integer.
+ static const uint32_t num_used_bits = 0;
+ // Number of bits that represent the exponent.
+ static const uint32_t num_exponent_bits = 0;
+ // Number of bits that represent the fractional part.
+ static const uint32_t num_fraction_bits = 0;
+ // The bias of the exponent. (How much we need to subtract from the stored
+ // value to get the correct value.)
+ static const uint32_t exponent_bias = 0;
+};
+
+// Traits for IEEE float.
+// 1 sign bit, 8 exponent bits, 23 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<float>> {
+ typedef uint32_t uint_type;
+ typedef int32_t int_type;
+ typedef FloatProxy<float> underlying_type;
+ typedef float native_type;
+ static const uint_type num_used_bits = 32;
+ static const uint_type num_exponent_bits = 8;
+ static const uint_type num_fraction_bits = 23;
+ static const uint_type exponent_bias = 127;
+};
+
+// Traits for IEEE double.
+// 1 sign bit, 11 exponent bits, 52 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<double>> {
+ typedef uint64_t uint_type;
+ typedef int64_t int_type;
+ typedef FloatProxy<double> underlying_type;
+ typedef double native_type;
+ static const uint_type num_used_bits = 64;
+ static const uint_type num_exponent_bits = 11;
+ static const uint_type num_fraction_bits = 52;
+ static const uint_type exponent_bias = 1023;
+};
+
+// Traits for IEEE half.
+// 1 sign bit, 5 exponent bits, 10 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<Float16>> {
+ typedef uint16_t uint_type;
+ typedef int16_t int_type;
+ typedef uint16_t underlying_type;
+ typedef uint16_t native_type;
+ static const uint_type num_used_bits = 16;
+ static const uint_type num_exponent_bits = 5;
+ static const uint_type num_fraction_bits = 10;
+ static const uint_type exponent_bias = 15;
+};
+
+enum round_direction {
+ kRoundToZero,
+ kRoundToNearestEven,
+ kRoundToPositiveInfinity,
+ kRoundToNegativeInfinity
+};
+
+// Template class that houses a floating pointer number.
+// It exposes a number of constants based on the provided traits to
+// assist in interpreting the bits of the value.
+template <typename T, typename Traits = HexFloatTraits<T>>
+class HexFloat {
+ public:
+ typedef typename Traits::uint_type uint_type;
+ typedef typename Traits::int_type int_type;
+ typedef typename Traits::underlying_type underlying_type;
+ typedef typename Traits::native_type native_type;
+
+ explicit HexFloat(T f) : value_(f) {}
+
+ T value() const { return value_; }
+ void set_value(T f) { value_ = f; }
+
+ // These are all written like this because it is convenient to have
+ // compile-time constants for all of these values.
+
+ // Pass-through values to save typing.
+ static const uint32_t num_used_bits = Traits::num_used_bits;
+ static const uint32_t exponent_bias = Traits::exponent_bias;
+ static const uint32_t num_exponent_bits = Traits::num_exponent_bits;
+ static const uint32_t num_fraction_bits = Traits::num_fraction_bits;
+
+ // Number of bits to shift left to set the highest relevant bit.
+ static const uint32_t top_bit_left_shift = num_used_bits - 1;
+ // How many nibbles (hex characters) the fractional part takes up.
+ static const uint32_t fraction_nibbles = (num_fraction_bits + 3) / 4;
+ // If the fractional part does not fit evenly into a hex character (4-bits)
+ // then we have to left-shift to get rid of leading 0s. This is the amount
+ // we have to shift (might be 0).
+ static const uint32_t num_overflow_bits =
+ fraction_nibbles * 4 - num_fraction_bits;
+
+ // The representation of the fraction, not the actual bits. This
+ // includes the leading bit that is usually implicit.
+ static const uint_type fraction_represent_mask =
+ spvutils::SetBits<uint_type, 0,
+ num_fraction_bits + num_overflow_bits>::get;
+
+ // The topmost bit in the nibble-aligned fraction.
+ static const uint_type fraction_top_bit =
+ uint_type(1) << (num_fraction_bits + num_overflow_bits - 1);
+
+ // The least significant bit in the exponent, which is also the bit
+ // immediately to the left of the significand.
+ static const uint_type first_exponent_bit = uint_type(1)
+ << (num_fraction_bits);
+
+ // The mask for the encoded fraction. It does not include the
+ // implicit bit.
+ static const uint_type fraction_encode_mask =
+ spvutils::SetBits<uint_type, 0, num_fraction_bits>::get;
+
+ // The bit that is used as a sign.
+ static const uint_type sign_mask = uint_type(1) << top_bit_left_shift;
+
+ // The bits that represent the exponent.
+ static const uint_type exponent_mask =
+ spvutils::SetBits<uint_type, num_fraction_bits, num_exponent_bits>::get;
+
+ // How far left the exponent is shifted.
+ static const uint32_t exponent_left_shift = num_fraction_bits;
+
+ // How far from the right edge the fraction is shifted.
+ static const uint32_t fraction_right_shift =
+ static_cast<uint32_t>(sizeof(uint_type) * 8) - num_fraction_bits;
+
+ // The maximum representable unbiased exponent.
+ static const int_type max_exponent =
+ (exponent_mask >> num_fraction_bits) - exponent_bias;
+ // The minimum representable exponent for normalized numbers.
+ static const int_type min_exponent = -static_cast<int_type>(exponent_bias);
+
+ // Returns the bits associated with the value.
+ uint_type getBits() const { return spvutils::BitwiseCast<uint_type>(value_); }
+
+ // Returns the bits associated with the value, without the leading sign bit.
+ uint_type getUnsignedBits() const {
+ return static_cast<uint_type>(spvutils::BitwiseCast<uint_type>(value_) &
+ ~sign_mask);
+ }
+
+ // Returns the bits associated with the exponent, shifted to start at the
+ // lsb of the type.
+ const uint_type getExponentBits() const {
+ return static_cast<uint_type>((getBits() & exponent_mask) >>
+ num_fraction_bits);
+ }
+
+ // Returns the exponent in unbiased form. This is the exponent in the
+ // human-friendly form.
+ const int_type getUnbiasedExponent() const {
+ return static_cast<int_type>(getExponentBits() - exponent_bias);
+ }
+
+ // Returns just the significand bits from the value.
+ const uint_type getSignificandBits() const {
+ return getBits() & fraction_encode_mask;
+ }
+
+ // If the number was normalized, returns the unbiased exponent.
+ // If the number was denormal, normalize the exponent first.
+ const int_type getUnbiasedNormalizedExponent() const {
+ if ((getBits() & ~sign_mask) == 0) { // special case if everything is 0
+ return 0;
+ }
+ int_type exp = getUnbiasedExponent();
+ if (exp == min_exponent) { // We are in denorm land.
+ uint_type significand_bits = getSignificandBits();
+ while ((significand_bits & (first_exponent_bit >> 1)) == 0) {
+ significand_bits = static_cast<uint_type>(significand_bits << 1);
+ exp = static_cast<int_type>(exp - 1);
+ }
+ significand_bits &= fraction_encode_mask;
+ }
+ return exp;
+ }
+
+ // Returns the signficand after it has been normalized.
+ const uint_type getNormalizedSignificand() const {
+ int_type unbiased_exponent = getUnbiasedNormalizedExponent();
+ uint_type significand = getSignificandBits();
+ for (int_type i = unbiased_exponent; i <= min_exponent; ++i) {
+ significand = static_cast<uint_type>(significand << 1);
+ }
+ significand &= fraction_encode_mask;
+ return significand;
+ }
+
+ // Returns true if this number represents a negative value.
+ bool isNegative() const { return (getBits() & sign_mask) != 0; }
+
+ // Sets this HexFloat from the individual components.
+ // Note this assumes EVERY significand is normalized, and has an implicit
+ // leading one. This means that the only way that this method will set 0,
+ // is if you set a number so denormalized that it underflows.
+ // Do not use this method with raw bits extracted from a subnormal number,
+ // since subnormals do not have an implicit leading 1 in the significand.
+ // The significand is also expected to be in the
+ // lowest-most num_fraction_bits of the uint_type.
+ // The exponent is expected to be unbiased, meaning an exponent of
+ // 0 actually means 0.
+ // If underflow_round_up is set, then on underflow, if a number is non-0
+ // and would underflow, we round up to the smallest denorm.
+ void setFromSignUnbiasedExponentAndNormalizedSignificand(
+ bool negative, int_type exponent, uint_type significand,
+ bool round_denorm_up) {
+ bool significand_is_zero = significand == 0;
+
+ if (exponent <= min_exponent) {
+ // If this was denormalized, then we have to shift the bit on, meaning
+ // the significand is not zero.
+ significand_is_zero = false;
+ significand |= first_exponent_bit;
+ significand = static_cast<uint_type>(significand >> 1);
+ }
+
+ while (exponent < min_exponent) {
+ significand = static_cast<uint_type>(significand >> 1);
+ ++exponent;
+ }
+
+ if (exponent == min_exponent) {
+ if (significand == 0 && !significand_is_zero && round_denorm_up) {
+ significand = static_cast<uint_type>(0x1);
+ }
+ }
+
+ uint_type new_value = 0;
+ if (negative) {
+ new_value = static_cast<uint_type>(new_value | sign_mask);
+ }
+ exponent = static_cast<int_type>(exponent + exponent_bias);
+ assert(exponent >= 0);
+
+ // put it all together
+ exponent = static_cast<uint_type>((exponent << exponent_left_shift) &
+ exponent_mask);
+ significand = static_cast<uint_type>(significand & fraction_encode_mask);
+ new_value = static_cast<uint_type>(new_value | (exponent | significand));
+ value_ = BitwiseCast<T>(new_value);
+ }
+
+ // Increments the significand of this number by the given amount.
+ // If this would spill the significand into the implicit bit,
+ // carry is set to true and the significand is shifted to fit into
+ // the correct location, otherwise carry is set to false.
+ // All significands and to_increment are assumed to be within the bounds
+ // for a valid significand.
+ static uint_type incrementSignificand(uint_type significand,
+ uint_type to_increment, bool* carry) {
+ significand = static_cast<uint_type>(significand + to_increment);
+ *carry = false;
+ if (significand & first_exponent_bit) {
+ *carry = true;
+ // The implicit 1-bit will have carried, so we should zero-out the
+ // top bit and shift back.
+ significand = static_cast<uint_type>(significand & ~first_exponent_bit);
+ significand = static_cast<uint_type>(significand >> 1);
+ }
+ return significand;
+ }
+
+ // These exist because MSVC throws warnings on negative right-shifts
+ // even if they are not going to be executed. Eg:
+ // constant_number < 0? 0: constant_number
+ // These convert the negative left-shifts into right shifts.
+
+ template <typename int_type>
+ uint_type negatable_left_shift(int_type N, uint_type val)
+ {
+ if(N >= 0)
+ return val << N;
+
+ return val >> -N;
+ }
+
+ template <typename int_type>
+ uint_type negatable_right_shift(int_type N, uint_type val)
+ {
+ if(N >= 0)
+ return val >> N;
+
+ return val << -N;
+ }
+
+ // Returns the significand, rounded to fit in a significand in
+ // other_T. This is shifted so that the most significant
+ // bit of the rounded number lines up with the most significant bit
+ // of the returned significand.
+ template <typename other_T>
+ typename other_T::uint_type getRoundedNormalizedSignificand(
+ round_direction dir, bool* carry_bit) {
+ typedef typename other_T::uint_type other_uint_type;
+ static const int_type num_throwaway_bits =
+ static_cast<int_type>(num_fraction_bits) -
+ static_cast<int_type>(other_T::num_fraction_bits);
+
+ static const uint_type last_significant_bit =
+ (num_throwaway_bits < 0)
+ ? 0
+ : negatable_left_shift(num_throwaway_bits, 1u);
+ static const uint_type first_rounded_bit =
+ (num_throwaway_bits < 1)
+ ? 0
+ : negatable_left_shift(num_throwaway_bits - 1, 1u);
+
+ static const uint_type throwaway_mask_bits =
+ num_throwaway_bits > 0 ? num_throwaway_bits : 0;
+ static const uint_type throwaway_mask =
+ spvutils::SetBits<uint_type, 0, throwaway_mask_bits>::get;
+
+ *carry_bit = false;
+ other_uint_type out_val = 0;
+ uint_type significand = getNormalizedSignificand();
+ // If we are up-casting, then we just have to shift to the right location.
+ if (num_throwaway_bits <= 0) {
+ out_val = static_cast<other_uint_type>(significand);
+ uint_type shift_amount = static_cast<uint_type>(-num_throwaway_bits);
+ out_val = static_cast<other_uint_type>(out_val << shift_amount);
+ return out_val;
+ }
+
+ // If every non-representable bit is 0, then we don't have any casting to
+ // do.
+ if ((significand & throwaway_mask) == 0) {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, significand));
+ }
+
+ bool round_away_from_zero = false;
+ // We actually have to narrow the significand here, so we have to follow the
+ // rounding rules.
+ switch (dir) {
+ case kRoundToZero:
+ break;
+ case kRoundToPositiveInfinity:
+ round_away_from_zero = !isNegative();
+ break;
+ case kRoundToNegativeInfinity:
+ round_away_from_zero = isNegative();
+ break;
+ case kRoundToNearestEven:
+ // Have to round down, round bit is 0
+ if ((first_rounded_bit & significand) == 0) {
+ break;
+ }
+ if (((significand & throwaway_mask) & ~first_rounded_bit) != 0) {
+ // If any subsequent bit of the rounded portion is non-0 then we round
+ // up.
+ round_away_from_zero = true;
+ break;
+ }
+ // We are exactly half-way between 2 numbers, pick even.
+ if ((significand & last_significant_bit) != 0) {
+ // 1 for our last bit, round up.
+ round_away_from_zero = true;
+ break;
+ }
+ break;
+ }
+
+ if (round_away_from_zero) {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, incrementSignificand(
+ significand, last_significant_bit, carry_bit)));
+ } else {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, significand));
+ }
+ }
+
+ // Casts this value to another HexFloat. If the cast is widening,
+ // then round_dir is ignored. If the cast is narrowing, then
+ // the result is rounded in the direction specified.
+ // This number will retain Nan and Inf values.
+ // It will also saturate to Inf if the number overflows, and
+ // underflow to (0 or min depending on rounding) if the number underflows.
+ template <typename other_T>
+ void castTo(other_T& other, round_direction round_dir) {
+ other = other_T(static_cast<typename other_T::native_type>(0));
+ bool negate = isNegative();
+ if (getUnsignedBits() == 0) {
+ if (negate) {
+ other.set_value(-other.value());
+ }
+ return;
+ }
+ uint_type significand = getSignificandBits();
+ bool carried = false;
+ typename other_T::uint_type rounded_significand =
+ getRoundedNormalizedSignificand<other_T>(round_dir, &carried);
+
+ int_type exponent = getUnbiasedExponent();
+ if (exponent == min_exponent) {
+ // If we are denormal, normalize the exponent, so that we can encode
+ // easily.
+ exponent = static_cast<int_type>(exponent + 1);
+ for (uint_type check_bit = first_exponent_bit >> 1; check_bit != 0;
+ check_bit = static_cast<uint_type>(check_bit >> 1)) {
+ exponent = static_cast<int_type>(exponent - 1);
+ if (check_bit & significand) break;
+ }
+ }
+
+ bool is_nan =
+ (getBits() & exponent_mask) == exponent_mask && significand != 0;
+ bool is_inf =
+ !is_nan &&
+ ((exponent + carried) > static_cast<int_type>(other_T::exponent_bias) ||
+ (significand == 0 && (getBits() & exponent_mask) == exponent_mask));
+
+ // If we are Nan or Inf we should pass that through.
+ if (is_inf) {
+ other.set_value(BitwiseCast<typename other_T::underlying_type>(
+ static_cast<typename other_T::uint_type>(
+ (negate ? other_T::sign_mask : 0) | other_T::exponent_mask)));
+ return;
+ }
+ if (is_nan) {
+ typename other_T::uint_type shifted_significand;
+ shifted_significand = static_cast<typename other_T::uint_type>(
+ negatable_left_shift(
+ static_cast<int_type>(other_T::num_fraction_bits) -
+ static_cast<int_type>(num_fraction_bits), significand));
+
+ // We are some sort of Nan. We try to keep the bit-pattern of the Nan
+ // as close as possible. If we had to shift off bits so we are 0, then we
+ // just set the last bit.
+ other.set_value(BitwiseCast<typename other_T::underlying_type>(
+ static_cast<typename other_T::uint_type>(
+ (negate ? other_T::sign_mask : 0) | other_T::exponent_mask |
+ (shifted_significand == 0 ? 0x1 : shifted_significand))));
+ return;
+ }
+
+ bool round_underflow_up =
+ isNegative() ? round_dir == kRoundToNegativeInfinity
+ : round_dir == kRoundToPositiveInfinity;
+ typedef typename other_T::int_type other_int_type;
+ // setFromSignUnbiasedExponentAndNormalizedSignificand will
+ // zero out any underflowing value (but retain the sign).
+ other.setFromSignUnbiasedExponentAndNormalizedSignificand(
+ negate, static_cast<other_int_type>(exponent), rounded_significand,
+ round_underflow_up);
+ return;
+ }
+
+ private:
+ T value_;
+
+ static_assert(num_used_bits ==
+ Traits::num_exponent_bits + Traits::num_fraction_bits + 1,
+ "The number of bits do not fit");
+ static_assert(sizeof(T) == sizeof(uint_type), "The type sizes do not match");
+};
+
+// Returns 4 bits represented by the hex character.
+inline uint8_t get_nibble_from_character(int character) {
+ const char* dec = "0123456789";
+ const char* lower = "abcdef";
+ const char* upper = "ABCDEF";
+ const char* p = nullptr;
+ if ((p = strchr(dec, character))) {
+ return static_cast<uint8_t>(p - dec);
+ } else if ((p = strchr(lower, character))) {
+ return static_cast<uint8_t>(p - lower + 0xa);
+ } else if ((p = strchr(upper, character))) {
+ return static_cast<uint8_t>(p - upper + 0xa);
+ }
+
+ assert(false && "This was called with a non-hex character");
+ return 0;
+}
+
+// Outputs the given HexFloat to the stream.
+template <typename T, typename Traits>
+std::ostream& operator<<(std::ostream& os, const HexFloat<T, Traits>& value) {
+ typedef HexFloat<T, Traits> HF;
+ typedef typename HF::uint_type uint_type;
+ typedef typename HF::int_type int_type;
+
+ static_assert(HF::num_used_bits != 0,
+ "num_used_bits must be non-zero for a valid float");
+ static_assert(HF::num_exponent_bits != 0,
+ "num_exponent_bits must be non-zero for a valid float");
+ static_assert(HF::num_fraction_bits != 0,
+ "num_fractin_bits must be non-zero for a valid float");
+
+ const uint_type bits = spvutils::BitwiseCast<uint_type>(value.value());
+ const char* const sign = (bits & HF::sign_mask) ? "-" : "";
+ const uint_type exponent = static_cast<uint_type>(
+ (bits & HF::exponent_mask) >> HF::num_fraction_bits);
+
+ uint_type fraction = static_cast<uint_type>((bits & HF::fraction_encode_mask)
+ << HF::num_overflow_bits);
+
+ const bool is_zero = exponent == 0 && fraction == 0;
+ const bool is_denorm = exponent == 0 && !is_zero;
+
+ // exponent contains the biased exponent we have to convert it back into
+ // the normal range.
+ int_type int_exponent = static_cast<int_type>(exponent - HF::exponent_bias);
+ // If the number is all zeros, then we actually have to NOT shift the
+ // exponent.
+ int_exponent = is_zero ? 0 : int_exponent;
+
+ // If we are denorm, then start shifting, and decreasing the exponent until
+ // our leading bit is 1.
+
+ if (is_denorm) {
+ while ((fraction & HF::fraction_top_bit) == 0) {
+ fraction = static_cast<uint_type>(fraction << 1);
+ int_exponent = static_cast<int_type>(int_exponent - 1);
+ }
+ // Since this is denormalized, we have to consume the leading 1 since it
+ // will end up being implicit.
+ fraction = static_cast<uint_type>(fraction << 1); // eat the leading 1
+ fraction &= HF::fraction_represent_mask;
+ }
+
+ uint_type fraction_nibbles = HF::fraction_nibbles;
+ // We do not have to display any trailing 0s, since this represents the
+ // fractional part.
+ while (fraction_nibbles > 0 && (fraction & 0xF) == 0) {
+ // Shift off any trailing values;
+ fraction = static_cast<uint_type>(fraction >> 4);
+ --fraction_nibbles;
+ }
+
+ const auto saved_flags = os.flags();
+ const auto saved_fill = os.fill();
+
+ os << sign << "0x" << (is_zero ? '0' : '1');
+ if (fraction_nibbles) {
+ // Make sure to keep the leading 0s in place, since this is the fractional
+ // part.
+ os << "." << std::setw(static_cast<int>(fraction_nibbles))
+ << std::setfill('0') << std::hex << fraction;
+ }
+ os << "p" << std::dec << (int_exponent >= 0 ? "+" : "") << int_exponent;
+
+ os.flags(saved_flags);
+ os.fill(saved_fill);
+
+ return os;
+}
+
+// Returns true if negate_value is true and the next character on the
+// input stream is a plus or minus sign. In that case we also set the fail bit
+// on the stream and set the value to the zero value for its type.
+template <typename T, typename Traits>
+inline bool RejectParseDueToLeadingSign(std::istream& is, bool negate_value,
+ HexFloat<T, Traits>& value) {
+ if (negate_value) {
+ auto next_char = is.peek();
+ if (next_char == '-' || next_char == '+') {
+ // Fail the parse. Emulate standard behaviour by setting the value to
+ // the zero value, and set the fail bit on the stream.
+ value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+ is.setstate(std::ios_base::failbit);
+ return true;
+ }
+ }
+ return false;
+}
+
+// Parses a floating point number from the given stream and stores it into the
+// value parameter.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <typename T, typename Traits>
+inline std::istream& ParseNormalFloat(std::istream& is, bool negate_value,
+ HexFloat<T, Traits>& value) {
+ if (RejectParseDueToLeadingSign(is, negate_value, value)) {
+ return is;
+ }
+ T val;
+ is >> val;
+ if (negate_value) {
+ val = -val;
+ }
+ value.set_value(val);
+ // In the failure case, map -0.0 to 0.0.
+ if (is.fail() && value.getUnsignedBits() == 0u) {
+ value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+ }
+ if (val.isInfinity()) {
+ // Fail the parse. Emulate standard behaviour by setting the value to
+ // the closest normal value, and set the fail bit on the stream.
+ value.set_value((value.isNegative() | negate_value) ? T::lowest()
+ : T::max());
+ is.setstate(std::ios_base::failbit);
+ }
+ return is;
+}
+
+// Specialization of ParseNormalFloat for FloatProxy<Float16> values.
+// This will parse the float as it were a 32-bit floating point number,
+// and then round it down to fit into a Float16 value.
+// The number is rounded towards zero.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <>
+inline std::istream&
+ParseNormalFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>(
+ std::istream& is, bool negate_value,
+ HexFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>& value) {
+ // First parse as a 32-bit float.
+ HexFloat<FloatProxy<float>> float_val(0.0f);
+ ParseNormalFloat(is, negate_value, float_val);
+
+ // Then convert to 16-bit float, saturating at infinities, and
+ // rounding toward zero.
+ float_val.castTo(value, kRoundToZero);
+
+ // Overflow on 16-bit behaves the same as for 32- and 64-bit: set the
+ // fail bit and set the lowest or highest value.
+ if (Float16::isInfinity(value.value().getAsFloat())) {
+ value.set_value(value.isNegative() ? Float16::lowest() : Float16::max());
+ is.setstate(std::ios_base::failbit);
+ }
+ return is;
+}
+
+// Reads a HexFloat from the given stream.
+// If the float is not encoded as a hex-float then it will be parsed
+// as a regular float.
+// This may fail if your stream does not support at least one unget.
+// Nan values can be encoded with "0x1.<not zero>p+exponent_bias".
+// This would normally overflow a float and round to
+// infinity but this special pattern is the exact representation for a NaN,
+// and therefore is actually encoded as the correct NaN. To encode inf,
+// either 0x0p+exponent_bias can be specified or any exponent greater than
+// exponent_bias.
+// Examples using IEEE 32-bit float encoding.
+// 0x1.0p+128 (+inf)
+// -0x1.0p-128 (-inf)
+//
+// 0x1.1p+128 (+Nan)
+// -0x1.1p+128 (-Nan)
+//
+// 0x1p+129 (+inf)
+// -0x1p+129 (-inf)
+template <typename T, typename Traits>
+std::istream& operator>>(std::istream& is, HexFloat<T, Traits>& value) {
+ using HF = HexFloat<T, Traits>;
+ using uint_type = typename HF::uint_type;
+ using int_type = typename HF::int_type;
+
+ value.set_value(static_cast<typename HF::native_type>(0.f));
+
+ if (is.flags() & std::ios::skipws) {
+ // If the user wants to skip whitespace , then we should obey that.
+ while (std::isspace(is.peek())) {
+ is.get();
+ }
+ }
+
+ auto next_char = is.peek();
+ bool negate_value = false;
+
+ if (next_char != '-' && next_char != '0') {
+ return ParseNormalFloat(is, negate_value, value);
+ }
+
+ if (next_char == '-') {
+ negate_value = true;
+ is.get();
+ next_char = is.peek();
+ }
+
+ if (next_char == '0') {
+ is.get(); // We may have to unget this.
+ auto maybe_hex_start = is.peek();
+ if (maybe_hex_start != 'x' && maybe_hex_start != 'X') {
+ is.unget();
+ return ParseNormalFloat(is, negate_value, value);
+ } else {
+ is.get(); // Throw away the 'x';
+ }
+ } else {
+ return ParseNormalFloat(is, negate_value, value);
+ }
+
+ // This "looks" like a hex-float so treat it as one.
+ bool seen_p = false;
+ bool seen_dot = false;
+ uint_type fraction_index = 0;
+
+ uint_type fraction = 0;
+ int_type exponent = HF::exponent_bias;
+
+ // Strip off leading zeros so we don't have to special-case them later.
+ while ((next_char = is.peek()) == '0') {
+ is.get();
+ }
+
+ bool is_denorm =
+ true; // Assume denorm "representation" until we hear otherwise.
+ // NB: This does not mean the value is actually denorm,
+ // it just means that it was written 0.
+ bool bits_written = false; // Stays false until we write a bit.
+ while (!seen_p && !seen_dot) {
+ // Handle characters that are left of the fractional part.
+ if (next_char == '.') {
+ seen_dot = true;
+ } else if (next_char == 'p') {
+ seen_p = true;
+ } else if (::isxdigit(next_char)) {
+ // We know this is not denormalized since we have stripped all leading
+ // zeroes and we are not a ".".
+ is_denorm = false;
+ int number = get_nibble_from_character(next_char);
+ for (int i = 0; i < 4; ++i, number <<= 1) {
+ uint_type write_bit = (number & 0x8) ? 0x1 : 0x0;
+ if (bits_written) {
+ // If we are here the bits represented belong in the fractional
+ // part of the float, and we have to adjust the exponent accordingly.
+ fraction = static_cast<uint_type>(
+ fraction |
+ static_cast<uint_type>(
+ write_bit << (HF::top_bit_left_shift - fraction_index++)));
+ exponent = static_cast<int_type>(exponent + 1);
+ }
+ bits_written |= write_bit != 0;
+ }
+ } else {
+ // We have not found our exponent yet, so we have to fail.
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+ bits_written = false;
+ while (seen_dot && !seen_p) {
+ // Handle only fractional parts now.
+ if (next_char == 'p') {
+ seen_p = true;
+ } else if (::isxdigit(next_char)) {
+ int number = get_nibble_from_character(next_char);
+ for (int i = 0; i < 4; ++i, number <<= 1) {
+ uint_type write_bit = (number & 0x8) ? 0x01 : 0x00;
+ bits_written |= write_bit != 0;
+ if (is_denorm && !bits_written) {
+ // Handle modifying the exponent here this way we can handle
+ // an arbitrary number of hex values without overflowing our
+ // integer.
+ exponent = static_cast<int_type>(exponent - 1);
+ } else {
+ fraction = static_cast<uint_type>(
+ fraction |
+ static_cast<uint_type>(
+ write_bit << (HF::top_bit_left_shift - fraction_index++)));
+ }
+ }
+ } else {
+ // We still have not found our 'p' exponent yet, so this is not a valid
+ // hex-float.
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+
+ bool seen_sign = false;
+ int8_t exponent_sign = 1;
+ int_type written_exponent = 0;
+ while (true) {
+ if ((next_char == '-' || next_char == '+')) {
+ if (seen_sign) {
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ seen_sign = true;
+ exponent_sign = (next_char == '-') ? -1 : 1;
+ } else if (::isdigit(next_char)) {
+ // Hex-floats express their exponent as decimal.
+ written_exponent = static_cast<int_type>(written_exponent * 10);
+ written_exponent =
+ static_cast<int_type>(written_exponent + (next_char - '0'));
+ } else {
+ break;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+
+ written_exponent = static_cast<int_type>(written_exponent * exponent_sign);
+ exponent = static_cast<int_type>(exponent + written_exponent);
+
+ bool is_zero = is_denorm && (fraction == 0);
+ if (is_denorm && !is_zero) {
+ fraction = static_cast<uint_type>(fraction << 1);
+ exponent = static_cast<int_type>(exponent - 1);
+ } else if (is_zero) {
+ exponent = 0;
+ }
+
+ if (exponent <= 0 && !is_zero) {
+ fraction = static_cast<uint_type>(fraction >> 1);
+ fraction |= static_cast<uint_type>(1) << HF::top_bit_left_shift;
+ }
+
+ fraction = (fraction >> HF::fraction_right_shift) & HF::fraction_encode_mask;
+
+ const int_type max_exponent =
+ SetBits<uint_type, 0, HF::num_exponent_bits>::get;
+
+ // Handle actual denorm numbers
+ while (exponent < 0 && !is_zero) {
+ fraction = static_cast<uint_type>(fraction >> 1);
+ exponent = static_cast<int_type>(exponent + 1);
+
+ fraction &= HF::fraction_encode_mask;
+ if (fraction == 0) {
+ // We have underflowed our fraction. We should clamp to zero.
+ is_zero = true;
+ exponent = 0;
+ }
+ }
+
+ // We have overflowed so we should be inf/-inf.
+ if (exponent > max_exponent) {
+ exponent = max_exponent;
+ fraction = 0;
+ }
+
+ uint_type output_bits = static_cast<uint_type>(
+ static_cast<uint_type>(negate_value ? 1 : 0) << HF::top_bit_left_shift);
+ output_bits |= fraction;
+
+ uint_type shifted_exponent = static_cast<uint_type>(
+ static_cast<uint_type>(exponent << HF::exponent_left_shift) &
+ HF::exponent_mask);
+ output_bits |= shifted_exponent;
+
+ T output_float = spvutils::BitwiseCast<T>(output_bits);
+ value.set_value(output_float);
+
+ return is;
+}
+
+// Writes a FloatProxy value to a stream.
+// Zero and normal numbers are printed in the usual notation, but with
+// enough digits to fully reproduce the value. Other values (subnormal,
+// NaN, and infinity) are printed as a hex float.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const FloatProxy<T>& value) {
+ auto float_val = value.getAsFloat();
+ switch (std::fpclassify(float_val)) {
+ case FP_ZERO:
+ case FP_NORMAL: {
+ auto saved_precision = os.precision();
+ os.precision(std::numeric_limits<T>::digits10);
+ os << float_val;
+ os.precision(saved_precision);
+ } break;
+ default:
+ os << HexFloat<FloatProxy<T>>(value);
+ break;
+ }
+ return os;
+}
+
+template <>
+inline std::ostream& operator<<<Float16>(std::ostream& os,
+ const FloatProxy<Float16>& value) {
+ os << HexFloat<FloatProxy<Float16>>(value);
+ return os;
+}
+}
+
+#endif // LIBSPIRV_UTIL_HEX_FLOAT_H_
diff --git a/src/3rdparty/glslang/SPIRV/spirv.hpp b/src/3rdparty/glslang/SPIRV/spirv.hpp
new file mode 100644
index 0000000..62c2dc9
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/spirv.hpp
@@ -0,0 +1,1343 @@
+// Copyright (c) 2014-2019 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+//
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+
+// This header is automatically generated by the same tool that creates
+// the Binary Section of the SPIR-V specification.
+
+// Enumeration tokens for SPIR-V, in various styles:
+// C, C++, C++11, JSON, Lua, Python, C#, D
+//
+// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+// - C# will use enum classes in the Specification class located in the "Spv" namespace,
+// e.g.: Spv.Specification.SourceLanguage.GLSL
+// - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL
+//
+// Some tokens act like mask values, which can be OR'd together,
+// while others are mutually exclusive. The mask-like ones have
+// "Mask" in their name, and a parallel enum that has the shift
+// amount (1 << x) for each corresponding enumerant.
+
+#ifndef spirv_HPP
+#define spirv_HPP
+
+namespace spv {
+
+typedef unsigned int Id;
+
+#define SPV_VERSION 0x10300
+#define SPV_REVISION 7
+
+static const unsigned int MagicNumber = 0x07230203;
+static const unsigned int Version = 0x00010300;
+static const unsigned int Revision = 7;
+static const unsigned int OpCodeMask = 0xffff;
+static const unsigned int WordCountShift = 16;
+
+enum SourceLanguage {
+ SourceLanguageUnknown = 0,
+ SourceLanguageESSL = 1,
+ SourceLanguageGLSL = 2,
+ SourceLanguageOpenCL_C = 3,
+ SourceLanguageOpenCL_CPP = 4,
+ SourceLanguageHLSL = 5,
+ SourceLanguageMax = 0x7fffffff,
+};
+
+enum ExecutionModel {
+ ExecutionModelVertex = 0,
+ ExecutionModelTessellationControl = 1,
+ ExecutionModelTessellationEvaluation = 2,
+ ExecutionModelGeometry = 3,
+ ExecutionModelFragment = 4,
+ ExecutionModelGLCompute = 5,
+ ExecutionModelKernel = 6,
+ ExecutionModelTaskNV = 5267,
+ ExecutionModelMeshNV = 5268,
+ ExecutionModelRayGenerationNV = 5313,
+ ExecutionModelIntersectionNV = 5314,
+ ExecutionModelAnyHitNV = 5315,
+ ExecutionModelClosestHitNV = 5316,
+ ExecutionModelMissNV = 5317,
+ ExecutionModelCallableNV = 5318,
+ ExecutionModelMax = 0x7fffffff,
+};
+
+enum AddressingModel {
+ AddressingModelLogical = 0,
+ AddressingModelPhysical32 = 1,
+ AddressingModelPhysical64 = 2,
+ AddressingModelPhysicalStorageBuffer64EXT = 5348,
+ AddressingModelMax = 0x7fffffff,
+};
+
+enum MemoryModel {
+ MemoryModelSimple = 0,
+ MemoryModelGLSL450 = 1,
+ MemoryModelOpenCL = 2,
+ MemoryModelVulkanKHR = 3,
+ MemoryModelMax = 0x7fffffff,
+};
+
+enum ExecutionMode {
+ ExecutionModeInvocations = 0,
+ ExecutionModeSpacingEqual = 1,
+ ExecutionModeSpacingFractionalEven = 2,
+ ExecutionModeSpacingFractionalOdd = 3,
+ ExecutionModeVertexOrderCw = 4,
+ ExecutionModeVertexOrderCcw = 5,
+ ExecutionModePixelCenterInteger = 6,
+ ExecutionModeOriginUpperLeft = 7,
+ ExecutionModeOriginLowerLeft = 8,
+ ExecutionModeEarlyFragmentTests = 9,
+ ExecutionModePointMode = 10,
+ ExecutionModeXfb = 11,
+ ExecutionModeDepthReplacing = 12,
+ ExecutionModeDepthGreater = 14,
+ ExecutionModeDepthLess = 15,
+ ExecutionModeDepthUnchanged = 16,
+ ExecutionModeLocalSize = 17,
+ ExecutionModeLocalSizeHint = 18,
+ ExecutionModeInputPoints = 19,
+ ExecutionModeInputLines = 20,
+ ExecutionModeInputLinesAdjacency = 21,
+ ExecutionModeTriangles = 22,
+ ExecutionModeInputTrianglesAdjacency = 23,
+ ExecutionModeQuads = 24,
+ ExecutionModeIsolines = 25,
+ ExecutionModeOutputVertices = 26,
+ ExecutionModeOutputPoints = 27,
+ ExecutionModeOutputLineStrip = 28,
+ ExecutionModeOutputTriangleStrip = 29,
+ ExecutionModeVecTypeHint = 30,
+ ExecutionModeContractionOff = 31,
+ ExecutionModeInitializer = 33,
+ ExecutionModeFinalizer = 34,
+ ExecutionModeSubgroupSize = 35,
+ ExecutionModeSubgroupsPerWorkgroup = 36,
+ ExecutionModeSubgroupsPerWorkgroupId = 37,
+ ExecutionModeLocalSizeId = 38,
+ ExecutionModeLocalSizeHintId = 39,
+ ExecutionModePostDepthCoverage = 4446,
+ ExecutionModeDenormPreserve = 4459,
+ ExecutionModeDenormFlushToZero = 4460,
+ ExecutionModeSignedZeroInfNanPreserve = 4461,
+ ExecutionModeRoundingModeRTE = 4462,
+ ExecutionModeRoundingModeRTZ = 4463,
+ ExecutionModeStencilRefReplacingEXT = 5027,
+ ExecutionModeOutputLinesNV = 5269,
+ ExecutionModeOutputPrimitivesNV = 5270,
+ ExecutionModeDerivativeGroupQuadsNV = 5289,
+ ExecutionModeDerivativeGroupLinearNV = 5290,
+ ExecutionModeOutputTrianglesNV = 5298,
+ ExecutionModeMax = 0x7fffffff,
+};
+
+enum StorageClass {
+ StorageClassUniformConstant = 0,
+ StorageClassInput = 1,
+ StorageClassUniform = 2,
+ StorageClassOutput = 3,
+ StorageClassWorkgroup = 4,
+ StorageClassCrossWorkgroup = 5,
+ StorageClassPrivate = 6,
+ StorageClassFunction = 7,
+ StorageClassGeneric = 8,
+ StorageClassPushConstant = 9,
+ StorageClassAtomicCounter = 10,
+ StorageClassImage = 11,
+ StorageClassStorageBuffer = 12,
+ StorageClassCallableDataNV = 5328,
+ StorageClassIncomingCallableDataNV = 5329,
+ StorageClassRayPayloadNV = 5338,
+ StorageClassHitAttributeNV = 5339,
+ StorageClassIncomingRayPayloadNV = 5342,
+ StorageClassShaderRecordBufferNV = 5343,
+ StorageClassPhysicalStorageBufferEXT = 5349,
+ StorageClassMax = 0x7fffffff,
+};
+
+enum Dim {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ DimCube = 3,
+ DimRect = 4,
+ DimBuffer = 5,
+ DimSubpassData = 6,
+ DimMax = 0x7fffffff,
+};
+
+enum SamplerAddressingMode {
+ SamplerAddressingModeNone = 0,
+ SamplerAddressingModeClampToEdge = 1,
+ SamplerAddressingModeClamp = 2,
+ SamplerAddressingModeRepeat = 3,
+ SamplerAddressingModeRepeatMirrored = 4,
+ SamplerAddressingModeMax = 0x7fffffff,
+};
+
+enum SamplerFilterMode {
+ SamplerFilterModeNearest = 0,
+ SamplerFilterModeLinear = 1,
+ SamplerFilterModeMax = 0x7fffffff,
+};
+
+enum ImageFormat {
+ ImageFormatUnknown = 0,
+ ImageFormatRgba32f = 1,
+ ImageFormatRgba16f = 2,
+ ImageFormatR32f = 3,
+ ImageFormatRgba8 = 4,
+ ImageFormatRgba8Snorm = 5,
+ ImageFormatRg32f = 6,
+ ImageFormatRg16f = 7,
+ ImageFormatR11fG11fB10f = 8,
+ ImageFormatR16f = 9,
+ ImageFormatRgba16 = 10,
+ ImageFormatRgb10A2 = 11,
+ ImageFormatRg16 = 12,
+ ImageFormatRg8 = 13,
+ ImageFormatR16 = 14,
+ ImageFormatR8 = 15,
+ ImageFormatRgba16Snorm = 16,
+ ImageFormatRg16Snorm = 17,
+ ImageFormatRg8Snorm = 18,
+ ImageFormatR16Snorm = 19,
+ ImageFormatR8Snorm = 20,
+ ImageFormatRgba32i = 21,
+ ImageFormatRgba16i = 22,
+ ImageFormatRgba8i = 23,
+ ImageFormatR32i = 24,
+ ImageFormatRg32i = 25,
+ ImageFormatRg16i = 26,
+ ImageFormatRg8i = 27,
+ ImageFormatR16i = 28,
+ ImageFormatR8i = 29,
+ ImageFormatRgba32ui = 30,
+ ImageFormatRgba16ui = 31,
+ ImageFormatRgba8ui = 32,
+ ImageFormatR32ui = 33,
+ ImageFormatRgb10a2ui = 34,
+ ImageFormatRg32ui = 35,
+ ImageFormatRg16ui = 36,
+ ImageFormatRg8ui = 37,
+ ImageFormatR16ui = 38,
+ ImageFormatR8ui = 39,
+ ImageFormatMax = 0x7fffffff,
+};
+
+enum ImageChannelOrder {
+ ImageChannelOrderR = 0,
+ ImageChannelOrderA = 1,
+ ImageChannelOrderRG = 2,
+ ImageChannelOrderRA = 3,
+ ImageChannelOrderRGB = 4,
+ ImageChannelOrderRGBA = 5,
+ ImageChannelOrderBGRA = 6,
+ ImageChannelOrderARGB = 7,
+ ImageChannelOrderIntensity = 8,
+ ImageChannelOrderLuminance = 9,
+ ImageChannelOrderRx = 10,
+ ImageChannelOrderRGx = 11,
+ ImageChannelOrderRGBx = 12,
+ ImageChannelOrderDepth = 13,
+ ImageChannelOrderDepthStencil = 14,
+ ImageChannelOrdersRGB = 15,
+ ImageChannelOrdersRGBx = 16,
+ ImageChannelOrdersRGBA = 17,
+ ImageChannelOrdersBGRA = 18,
+ ImageChannelOrderABGR = 19,
+ ImageChannelOrderMax = 0x7fffffff,
+};
+
+enum ImageChannelDataType {
+ ImageChannelDataTypeSnormInt8 = 0,
+ ImageChannelDataTypeSnormInt16 = 1,
+ ImageChannelDataTypeUnormInt8 = 2,
+ ImageChannelDataTypeUnormInt16 = 3,
+ ImageChannelDataTypeUnormShort565 = 4,
+ ImageChannelDataTypeUnormShort555 = 5,
+ ImageChannelDataTypeUnormInt101010 = 6,
+ ImageChannelDataTypeSignedInt8 = 7,
+ ImageChannelDataTypeSignedInt16 = 8,
+ ImageChannelDataTypeSignedInt32 = 9,
+ ImageChannelDataTypeUnsignedInt8 = 10,
+ ImageChannelDataTypeUnsignedInt16 = 11,
+ ImageChannelDataTypeUnsignedInt32 = 12,
+ ImageChannelDataTypeHalfFloat = 13,
+ ImageChannelDataTypeFloat = 14,
+ ImageChannelDataTypeUnormInt24 = 15,
+ ImageChannelDataTypeUnormInt101010_2 = 16,
+ ImageChannelDataTypeMax = 0x7fffffff,
+};
+
+enum ImageOperandsShift {
+ ImageOperandsBiasShift = 0,
+ ImageOperandsLodShift = 1,
+ ImageOperandsGradShift = 2,
+ ImageOperandsConstOffsetShift = 3,
+ ImageOperandsOffsetShift = 4,
+ ImageOperandsConstOffsetsShift = 5,
+ ImageOperandsSampleShift = 6,
+ ImageOperandsMinLodShift = 7,
+ ImageOperandsMakeTexelAvailableKHRShift = 8,
+ ImageOperandsMakeTexelVisibleKHRShift = 9,
+ ImageOperandsNonPrivateTexelKHRShift = 10,
+ ImageOperandsVolatileTexelKHRShift = 11,
+ ImageOperandsMax = 0x7fffffff,
+};
+
+enum ImageOperandsMask {
+ ImageOperandsMaskNone = 0,
+ ImageOperandsBiasMask = 0x00000001,
+ ImageOperandsLodMask = 0x00000002,
+ ImageOperandsGradMask = 0x00000004,
+ ImageOperandsConstOffsetMask = 0x00000008,
+ ImageOperandsOffsetMask = 0x00000010,
+ ImageOperandsConstOffsetsMask = 0x00000020,
+ ImageOperandsSampleMask = 0x00000040,
+ ImageOperandsMinLodMask = 0x00000080,
+ ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
+ ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
+ ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
+ ImageOperandsVolatileTexelKHRMask = 0x00000800,
+};
+
+enum FPFastMathModeShift {
+ FPFastMathModeNotNaNShift = 0,
+ FPFastMathModeNotInfShift = 1,
+ FPFastMathModeNSZShift = 2,
+ FPFastMathModeAllowRecipShift = 3,
+ FPFastMathModeFastShift = 4,
+ FPFastMathModeMax = 0x7fffffff,
+};
+
+enum FPFastMathModeMask {
+ FPFastMathModeMaskNone = 0,
+ FPFastMathModeNotNaNMask = 0x00000001,
+ FPFastMathModeNotInfMask = 0x00000002,
+ FPFastMathModeNSZMask = 0x00000004,
+ FPFastMathModeAllowRecipMask = 0x00000008,
+ FPFastMathModeFastMask = 0x00000010,
+};
+
+enum FPRoundingMode {
+ FPRoundingModeRTE = 0,
+ FPRoundingModeRTZ = 1,
+ FPRoundingModeRTP = 2,
+ FPRoundingModeRTN = 3,
+ FPRoundingModeMax = 0x7fffffff,
+};
+
+enum LinkageType {
+ LinkageTypeExport = 0,
+ LinkageTypeImport = 1,
+ LinkageTypeMax = 0x7fffffff,
+};
+
+enum AccessQualifier {
+ AccessQualifierReadOnly = 0,
+ AccessQualifierWriteOnly = 1,
+ AccessQualifierReadWrite = 2,
+ AccessQualifierMax = 0x7fffffff,
+};
+
+enum FunctionParameterAttribute {
+ FunctionParameterAttributeZext = 0,
+ FunctionParameterAttributeSext = 1,
+ FunctionParameterAttributeByVal = 2,
+ FunctionParameterAttributeSret = 3,
+ FunctionParameterAttributeNoAlias = 4,
+ FunctionParameterAttributeNoCapture = 5,
+ FunctionParameterAttributeNoWrite = 6,
+ FunctionParameterAttributeNoReadWrite = 7,
+ FunctionParameterAttributeMax = 0x7fffffff,
+};
+
+enum Decoration {
+ DecorationRelaxedPrecision = 0,
+ DecorationSpecId = 1,
+ DecorationBlock = 2,
+ DecorationBufferBlock = 3,
+ DecorationRowMajor = 4,
+ DecorationColMajor = 5,
+ DecorationArrayStride = 6,
+ DecorationMatrixStride = 7,
+ DecorationGLSLShared = 8,
+ DecorationGLSLPacked = 9,
+ DecorationCPacked = 10,
+ DecorationBuiltIn = 11,
+ DecorationNoPerspective = 13,
+ DecorationFlat = 14,
+ DecorationPatch = 15,
+ DecorationCentroid = 16,
+ DecorationSample = 17,
+ DecorationInvariant = 18,
+ DecorationRestrict = 19,
+ DecorationAliased = 20,
+ DecorationVolatile = 21,
+ DecorationConstant = 22,
+ DecorationCoherent = 23,
+ DecorationNonWritable = 24,
+ DecorationNonReadable = 25,
+ DecorationUniform = 26,
+ DecorationSaturatedConversion = 28,
+ DecorationStream = 29,
+ DecorationLocation = 30,
+ DecorationComponent = 31,
+ DecorationIndex = 32,
+ DecorationBinding = 33,
+ DecorationDescriptorSet = 34,
+ DecorationOffset = 35,
+ DecorationXfbBuffer = 36,
+ DecorationXfbStride = 37,
+ DecorationFuncParamAttr = 38,
+ DecorationFPRoundingMode = 39,
+ DecorationFPFastMathMode = 40,
+ DecorationLinkageAttributes = 41,
+ DecorationNoContraction = 42,
+ DecorationInputAttachmentIndex = 43,
+ DecorationAlignment = 44,
+ DecorationMaxByteOffset = 45,
+ DecorationAlignmentId = 46,
+ DecorationMaxByteOffsetId = 47,
+ DecorationNoSignedWrap = 4469,
+ DecorationNoUnsignedWrap = 4470,
+ DecorationExplicitInterpAMD = 4999,
+ DecorationOverrideCoverageNV = 5248,
+ DecorationPassthroughNV = 5250,
+ DecorationViewportRelativeNV = 5252,
+ DecorationSecondaryViewportRelativeNV = 5256,
+ DecorationPerPrimitiveNV = 5271,
+ DecorationPerViewNV = 5272,
+ DecorationPerTaskNV = 5273,
+ DecorationPerVertexNV = 5285,
+ DecorationNonUniformEXT = 5300,
+ DecorationRestrictPointerEXT = 5355,
+ DecorationAliasedPointerEXT = 5356,
+ DecorationHlslCounterBufferGOOGLE = 5634,
+ DecorationHlslSemanticGOOGLE = 5635,
+ DecorationMax = 0x7fffffff,
+};
+
+enum BuiltIn {
+ BuiltInPosition = 0,
+ BuiltInPointSize = 1,
+ BuiltInClipDistance = 3,
+ BuiltInCullDistance = 4,
+ BuiltInVertexId = 5,
+ BuiltInInstanceId = 6,
+ BuiltInPrimitiveId = 7,
+ BuiltInInvocationId = 8,
+ BuiltInLayer = 9,
+ BuiltInViewportIndex = 10,
+ BuiltInTessLevelOuter = 11,
+ BuiltInTessLevelInner = 12,
+ BuiltInTessCoord = 13,
+ BuiltInPatchVertices = 14,
+ BuiltInFragCoord = 15,
+ BuiltInPointCoord = 16,
+ BuiltInFrontFacing = 17,
+ BuiltInSampleId = 18,
+ BuiltInSamplePosition = 19,
+ BuiltInSampleMask = 20,
+ BuiltInFragDepth = 22,
+ BuiltInHelperInvocation = 23,
+ BuiltInNumWorkgroups = 24,
+ BuiltInWorkgroupSize = 25,
+ BuiltInWorkgroupId = 26,
+ BuiltInLocalInvocationId = 27,
+ BuiltInGlobalInvocationId = 28,
+ BuiltInLocalInvocationIndex = 29,
+ BuiltInWorkDim = 30,
+ BuiltInGlobalSize = 31,
+ BuiltInEnqueuedWorkgroupSize = 32,
+ BuiltInGlobalOffset = 33,
+ BuiltInGlobalLinearId = 34,
+ BuiltInSubgroupSize = 36,
+ BuiltInSubgroupMaxSize = 37,
+ BuiltInNumSubgroups = 38,
+ BuiltInNumEnqueuedSubgroups = 39,
+ BuiltInSubgroupId = 40,
+ BuiltInSubgroupLocalInvocationId = 41,
+ BuiltInVertexIndex = 42,
+ BuiltInInstanceIndex = 43,
+ BuiltInSubgroupEqMask = 4416,
+ BuiltInSubgroupEqMaskKHR = 4416,
+ BuiltInSubgroupGeMask = 4417,
+ BuiltInSubgroupGeMaskKHR = 4417,
+ BuiltInSubgroupGtMask = 4418,
+ BuiltInSubgroupGtMaskKHR = 4418,
+ BuiltInSubgroupLeMask = 4419,
+ BuiltInSubgroupLeMaskKHR = 4419,
+ BuiltInSubgroupLtMask = 4420,
+ BuiltInSubgroupLtMaskKHR = 4420,
+ BuiltInBaseVertex = 4424,
+ BuiltInBaseInstance = 4425,
+ BuiltInDrawIndex = 4426,
+ BuiltInDeviceIndex = 4438,
+ BuiltInViewIndex = 4440,
+ BuiltInBaryCoordNoPerspAMD = 4992,
+ BuiltInBaryCoordNoPerspCentroidAMD = 4993,
+ BuiltInBaryCoordNoPerspSampleAMD = 4994,
+ BuiltInBaryCoordSmoothAMD = 4995,
+ BuiltInBaryCoordSmoothCentroidAMD = 4996,
+ BuiltInBaryCoordSmoothSampleAMD = 4997,
+ BuiltInBaryCoordPullModelAMD = 4998,
+ BuiltInFragStencilRefEXT = 5014,
+ BuiltInViewportMaskNV = 5253,
+ BuiltInSecondaryPositionNV = 5257,
+ BuiltInSecondaryViewportMaskNV = 5258,
+ BuiltInPositionPerViewNV = 5261,
+ BuiltInViewportMaskPerViewNV = 5262,
+ BuiltInFullyCoveredEXT = 5264,
+ BuiltInTaskCountNV = 5274,
+ BuiltInPrimitiveCountNV = 5275,
+ BuiltInPrimitiveIndicesNV = 5276,
+ BuiltInClipDistancePerViewNV = 5277,
+ BuiltInCullDistancePerViewNV = 5278,
+ BuiltInLayerPerViewNV = 5279,
+ BuiltInMeshViewCountNV = 5280,
+ BuiltInMeshViewIndicesNV = 5281,
+ BuiltInBaryCoordNV = 5286,
+ BuiltInBaryCoordNoPerspNV = 5287,
+ BuiltInFragSizeEXT = 5292,
+ BuiltInFragmentSizeNV = 5292,
+ BuiltInFragInvocationCountEXT = 5293,
+ BuiltInInvocationsPerPixelNV = 5293,
+ BuiltInLaunchIdNV = 5319,
+ BuiltInLaunchSizeNV = 5320,
+ BuiltInWorldRayOriginNV = 5321,
+ BuiltInWorldRayDirectionNV = 5322,
+ BuiltInObjectRayOriginNV = 5323,
+ BuiltInObjectRayDirectionNV = 5324,
+ BuiltInRayTminNV = 5325,
+ BuiltInRayTmaxNV = 5326,
+ BuiltInInstanceCustomIndexNV = 5327,
+ BuiltInObjectToWorldNV = 5330,
+ BuiltInWorldToObjectNV = 5331,
+ BuiltInHitTNV = 5332,
+ BuiltInHitKindNV = 5333,
+ BuiltInIncomingRayFlagsNV = 5351,
+ BuiltInMax = 0x7fffffff,
+};
+
+enum SelectionControlShift {
+ SelectionControlFlattenShift = 0,
+ SelectionControlDontFlattenShift = 1,
+ SelectionControlMax = 0x7fffffff,
+};
+
+enum SelectionControlMask {
+ SelectionControlMaskNone = 0,
+ SelectionControlFlattenMask = 0x00000001,
+ SelectionControlDontFlattenMask = 0x00000002,
+};
+
+enum LoopControlShift {
+ LoopControlUnrollShift = 0,
+ LoopControlDontUnrollShift = 1,
+ LoopControlDependencyInfiniteShift = 2,
+ LoopControlDependencyLengthShift = 3,
+ LoopControlMax = 0x7fffffff,
+};
+
+enum LoopControlMask {
+ LoopControlMaskNone = 0,
+ LoopControlUnrollMask = 0x00000001,
+ LoopControlDontUnrollMask = 0x00000002,
+ LoopControlDependencyInfiniteMask = 0x00000004,
+ LoopControlDependencyLengthMask = 0x00000008,
+};
+
+enum FunctionControlShift {
+ FunctionControlInlineShift = 0,
+ FunctionControlDontInlineShift = 1,
+ FunctionControlPureShift = 2,
+ FunctionControlConstShift = 3,
+ FunctionControlMax = 0x7fffffff,
+};
+
+enum FunctionControlMask {
+ FunctionControlMaskNone = 0,
+ FunctionControlInlineMask = 0x00000001,
+ FunctionControlDontInlineMask = 0x00000002,
+ FunctionControlPureMask = 0x00000004,
+ FunctionControlConstMask = 0x00000008,
+};
+
+enum MemorySemanticsShift {
+ MemorySemanticsAcquireShift = 1,
+ MemorySemanticsReleaseShift = 2,
+ MemorySemanticsAcquireReleaseShift = 3,
+ MemorySemanticsSequentiallyConsistentShift = 4,
+ MemorySemanticsUniformMemoryShift = 6,
+ MemorySemanticsSubgroupMemoryShift = 7,
+ MemorySemanticsWorkgroupMemoryShift = 8,
+ MemorySemanticsCrossWorkgroupMemoryShift = 9,
+ MemorySemanticsAtomicCounterMemoryShift = 10,
+ MemorySemanticsImageMemoryShift = 11,
+ MemorySemanticsOutputMemoryKHRShift = 12,
+ MemorySemanticsMakeAvailableKHRShift = 13,
+ MemorySemanticsMakeVisibleKHRShift = 14,
+ MemorySemanticsMax = 0x7fffffff,
+};
+
+enum MemorySemanticsMask {
+ MemorySemanticsMaskNone = 0,
+ MemorySemanticsAcquireMask = 0x00000002,
+ MemorySemanticsReleaseMask = 0x00000004,
+ MemorySemanticsAcquireReleaseMask = 0x00000008,
+ MemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ MemorySemanticsUniformMemoryMask = 0x00000040,
+ MemorySemanticsSubgroupMemoryMask = 0x00000080,
+ MemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ MemorySemanticsImageMemoryMask = 0x00000800,
+ MemorySemanticsOutputMemoryKHRMask = 0x00001000,
+ MemorySemanticsMakeAvailableKHRMask = 0x00002000,
+ MemorySemanticsMakeVisibleKHRMask = 0x00004000,
+};
+
+enum MemoryAccessShift {
+ MemoryAccessVolatileShift = 0,
+ MemoryAccessAlignedShift = 1,
+ MemoryAccessNontemporalShift = 2,
+ MemoryAccessMakePointerAvailableKHRShift = 3,
+ MemoryAccessMakePointerVisibleKHRShift = 4,
+ MemoryAccessNonPrivatePointerKHRShift = 5,
+ MemoryAccessMax = 0x7fffffff,
+};
+
+enum MemoryAccessMask {
+ MemoryAccessMaskNone = 0,
+ MemoryAccessVolatileMask = 0x00000001,
+ MemoryAccessAlignedMask = 0x00000002,
+ MemoryAccessNontemporalMask = 0x00000004,
+ MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
+ MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
+ MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
+};
+
+enum Scope {
+ ScopeCrossDevice = 0,
+ ScopeDevice = 1,
+ ScopeWorkgroup = 2,
+ ScopeSubgroup = 3,
+ ScopeInvocation = 4,
+ ScopeQueueFamilyKHR = 5,
+ ScopeMax = 0x7fffffff,
+};
+
+enum GroupOperation {
+ GroupOperationReduce = 0,
+ GroupOperationInclusiveScan = 1,
+ GroupOperationExclusiveScan = 2,
+ GroupOperationClusteredReduce = 3,
+ GroupOperationPartitionedReduceNV = 6,
+ GroupOperationPartitionedInclusiveScanNV = 7,
+ GroupOperationPartitionedExclusiveScanNV = 8,
+ GroupOperationMax = 0x7fffffff,
+};
+
+enum KernelEnqueueFlags {
+ KernelEnqueueFlagsNoWait = 0,
+ KernelEnqueueFlagsWaitKernel = 1,
+ KernelEnqueueFlagsWaitWorkGroup = 2,
+ KernelEnqueueFlagsMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoShift {
+ KernelProfilingInfoCmdExecTimeShift = 0,
+ KernelProfilingInfoMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoMask {
+ KernelProfilingInfoMaskNone = 0,
+ KernelProfilingInfoCmdExecTimeMask = 0x00000001,
+};
+
+enum Capability {
+ CapabilityMatrix = 0,
+ CapabilityShader = 1,
+ CapabilityGeometry = 2,
+ CapabilityTessellation = 3,
+ CapabilityAddresses = 4,
+ CapabilityLinkage = 5,
+ CapabilityKernel = 6,
+ CapabilityVector16 = 7,
+ CapabilityFloat16Buffer = 8,
+ CapabilityFloat16 = 9,
+ CapabilityFloat64 = 10,
+ CapabilityInt64 = 11,
+ CapabilityInt64Atomics = 12,
+ CapabilityImageBasic = 13,
+ CapabilityImageReadWrite = 14,
+ CapabilityImageMipmap = 15,
+ CapabilityPipes = 17,
+ CapabilityGroups = 18,
+ CapabilityDeviceEnqueue = 19,
+ CapabilityLiteralSampler = 20,
+ CapabilityAtomicStorage = 21,
+ CapabilityInt16 = 22,
+ CapabilityTessellationPointSize = 23,
+ CapabilityGeometryPointSize = 24,
+ CapabilityImageGatherExtended = 25,
+ CapabilityStorageImageMultisample = 27,
+ CapabilityUniformBufferArrayDynamicIndexing = 28,
+ CapabilitySampledImageArrayDynamicIndexing = 29,
+ CapabilityStorageBufferArrayDynamicIndexing = 30,
+ CapabilityStorageImageArrayDynamicIndexing = 31,
+ CapabilityClipDistance = 32,
+ CapabilityCullDistance = 33,
+ CapabilityImageCubeArray = 34,
+ CapabilitySampleRateShading = 35,
+ CapabilityImageRect = 36,
+ CapabilitySampledRect = 37,
+ CapabilityGenericPointer = 38,
+ CapabilityInt8 = 39,
+ CapabilityInputAttachment = 40,
+ CapabilitySparseResidency = 41,
+ CapabilityMinLod = 42,
+ CapabilitySampled1D = 43,
+ CapabilityImage1D = 44,
+ CapabilitySampledCubeArray = 45,
+ CapabilitySampledBuffer = 46,
+ CapabilityImageBuffer = 47,
+ CapabilityImageMSArray = 48,
+ CapabilityStorageImageExtendedFormats = 49,
+ CapabilityImageQuery = 50,
+ CapabilityDerivativeControl = 51,
+ CapabilityInterpolationFunction = 52,
+ CapabilityTransformFeedback = 53,
+ CapabilityGeometryStreams = 54,
+ CapabilityStorageImageReadWithoutFormat = 55,
+ CapabilityStorageImageWriteWithoutFormat = 56,
+ CapabilityMultiViewport = 57,
+ CapabilitySubgroupDispatch = 58,
+ CapabilityNamedBarrier = 59,
+ CapabilityPipeStorage = 60,
+ CapabilityGroupNonUniform = 61,
+ CapabilityGroupNonUniformVote = 62,
+ CapabilityGroupNonUniformArithmetic = 63,
+ CapabilityGroupNonUniformBallot = 64,
+ CapabilityGroupNonUniformShuffle = 65,
+ CapabilityGroupNonUniformShuffleRelative = 66,
+ CapabilityGroupNonUniformClustered = 67,
+ CapabilityGroupNonUniformQuad = 68,
+ CapabilitySubgroupBallotKHR = 4423,
+ CapabilityDrawParameters = 4427,
+ CapabilitySubgroupVoteKHR = 4431,
+ CapabilityStorageBuffer16BitAccess = 4433,
+ CapabilityStorageUniformBufferBlock16 = 4433,
+ CapabilityStorageUniform16 = 4434,
+ CapabilityUniformAndStorageBuffer16BitAccess = 4434,
+ CapabilityStoragePushConstant16 = 4435,
+ CapabilityStorageInputOutput16 = 4436,
+ CapabilityDeviceGroup = 4437,
+ CapabilityMultiView = 4439,
+ CapabilityVariablePointersStorageBuffer = 4441,
+ CapabilityVariablePointers = 4442,
+ CapabilityAtomicStorageOps = 4445,
+ CapabilitySampleMaskPostDepthCoverage = 4447,
+ CapabilityStorageBuffer8BitAccess = 4448,
+ CapabilityUniformAndStorageBuffer8BitAccess = 4449,
+ CapabilityStoragePushConstant8 = 4450,
+ CapabilityDenormPreserve = 4464,
+ CapabilityDenormFlushToZero = 4465,
+ CapabilitySignedZeroInfNanPreserve = 4466,
+ CapabilityRoundingModeRTE = 4467,
+ CapabilityRoundingModeRTZ = 4468,
+ CapabilityFloat16ImageAMD = 5008,
+ CapabilityImageGatherBiasLodAMD = 5009,
+ CapabilityFragmentMaskAMD = 5010,
+ CapabilityStencilExportEXT = 5013,
+ CapabilityImageReadWriteLodAMD = 5015,
+ CapabilitySampleMaskOverrideCoverageNV = 5249,
+ CapabilityGeometryShaderPassthroughNV = 5251,
+ CapabilityShaderViewportIndexLayerEXT = 5254,
+ CapabilityShaderViewportIndexLayerNV = 5254,
+ CapabilityShaderViewportMaskNV = 5255,
+ CapabilityShaderStereoViewNV = 5259,
+ CapabilityPerViewAttributesNV = 5260,
+ CapabilityFragmentFullyCoveredEXT = 5265,
+ CapabilityMeshShadingNV = 5266,
+ CapabilityImageFootprintNV = 5282,
+ CapabilityFragmentBarycentricNV = 5284,
+ CapabilityComputeDerivativeGroupQuadsNV = 5288,
+ CapabilityFragmentDensityEXT = 5291,
+ CapabilityShadingRateNV = 5291,
+ CapabilityGroupNonUniformPartitionedNV = 5297,
+ CapabilityShaderNonUniformEXT = 5301,
+ CapabilityRuntimeDescriptorArrayEXT = 5302,
+ CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303,
+ CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304,
+ CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305,
+ CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306,
+ CapabilitySampledImageArrayNonUniformIndexingEXT = 5307,
+ CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308,
+ CapabilityStorageImageArrayNonUniformIndexingEXT = 5309,
+ CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
+ CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
+ CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
+ CapabilityRayTracingNV = 5340,
+ CapabilityVulkanMemoryModelKHR = 5345,
+ CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
+ CapabilityPhysicalStorageBufferAddressesEXT = 5347,
+ CapabilityComputeDerivativeGroupLinearNV = 5350,
+ CapabilityCooperativeMatrixNV = 5357,
+ CapabilitySubgroupShuffleINTEL = 5568,
+ CapabilitySubgroupBufferBlockIOINTEL = 5569,
+ CapabilitySubgroupImageBlockIOINTEL = 5570,
+ CapabilitySubgroupImageMediaBlockIOINTEL = 5579,
+ CapabilitySubgroupAvcMotionEstimationINTEL = 5696,
+ CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697,
+ CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698,
+ CapabilityMax = 0x7fffffff,
+};
+
+enum Op {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpExecutionModeId = 331,
+ OpDecorateId = 332,
+ OpGroupNonUniformElect = 333,
+ OpGroupNonUniformAll = 334,
+ OpGroupNonUniformAny = 335,
+ OpGroupNonUniformAllEqual = 336,
+ OpGroupNonUniformBroadcast = 337,
+ OpGroupNonUniformBroadcastFirst = 338,
+ OpGroupNonUniformBallot = 339,
+ OpGroupNonUniformInverseBallot = 340,
+ OpGroupNonUniformBallotBitExtract = 341,
+ OpGroupNonUniformBallotBitCount = 342,
+ OpGroupNonUniformBallotFindLSB = 343,
+ OpGroupNonUniformBallotFindMSB = 344,
+ OpGroupNonUniformShuffle = 345,
+ OpGroupNonUniformShuffleXor = 346,
+ OpGroupNonUniformShuffleUp = 347,
+ OpGroupNonUniformShuffleDown = 348,
+ OpGroupNonUniformIAdd = 349,
+ OpGroupNonUniformFAdd = 350,
+ OpGroupNonUniformIMul = 351,
+ OpGroupNonUniformFMul = 352,
+ OpGroupNonUniformSMin = 353,
+ OpGroupNonUniformUMin = 354,
+ OpGroupNonUniformFMin = 355,
+ OpGroupNonUniformSMax = 356,
+ OpGroupNonUniformUMax = 357,
+ OpGroupNonUniformFMax = 358,
+ OpGroupNonUniformBitwiseAnd = 359,
+ OpGroupNonUniformBitwiseOr = 360,
+ OpGroupNonUniformBitwiseXor = 361,
+ OpGroupNonUniformLogicalAnd = 362,
+ OpGroupNonUniformLogicalOr = 363,
+ OpGroupNonUniformLogicalXor = 364,
+ OpGroupNonUniformQuadBroadcast = 365,
+ OpGroupNonUniformQuadSwap = 366,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ OpGroupIAddNonUniformAMD = 5000,
+ OpGroupFAddNonUniformAMD = 5001,
+ OpGroupFMinNonUniformAMD = 5002,
+ OpGroupUMinNonUniformAMD = 5003,
+ OpGroupSMinNonUniformAMD = 5004,
+ OpGroupFMaxNonUniformAMD = 5005,
+ OpGroupUMaxNonUniformAMD = 5006,
+ OpGroupSMaxNonUniformAMD = 5007,
+ OpFragmentMaskFetchAMD = 5011,
+ OpFragmentFetchAMD = 5012,
+ OpImageSampleFootprintNV = 5283,
+ OpGroupNonUniformPartitionNV = 5296,
+ OpWritePackedPrimitiveIndices4x8NV = 5299,
+ OpReportIntersectionNV = 5334,
+ OpIgnoreIntersectionNV = 5335,
+ OpTerminateRayNV = 5336,
+ OpTraceNV = 5337,
+ OpTypeAccelerationStructureNV = 5341,
+ OpExecuteCallableNV = 5344,
+ OpTypeCooperativeMatrixNV = 5358,
+ OpCooperativeMatrixLoadNV = 5359,
+ OpCooperativeMatrixStoreNV = 5360,
+ OpCooperativeMatrixMulAddNV = 5361,
+ OpCooperativeMatrixLengthNV = 5362,
+ OpSubgroupShuffleINTEL = 5571,
+ OpSubgroupShuffleDownINTEL = 5572,
+ OpSubgroupShuffleUpINTEL = 5573,
+ OpSubgroupShuffleXorINTEL = 5574,
+ OpSubgroupBlockReadINTEL = 5575,
+ OpSubgroupBlockWriteINTEL = 5576,
+ OpSubgroupImageBlockReadINTEL = 5577,
+ OpSubgroupImageBlockWriteINTEL = 5578,
+ OpSubgroupImageMediaBlockReadINTEL = 5580,
+ OpSubgroupImageMediaBlockWriteINTEL = 5581,
+ OpDecorateStringGOOGLE = 5632,
+ OpMemberDecorateStringGOOGLE = 5633,
+ OpVmeImageINTEL = 5699,
+ OpTypeVmeImageINTEL = 5700,
+ OpTypeAvcImePayloadINTEL = 5701,
+ OpTypeAvcRefPayloadINTEL = 5702,
+ OpTypeAvcSicPayloadINTEL = 5703,
+ OpTypeAvcMcePayloadINTEL = 5704,
+ OpTypeAvcMceResultINTEL = 5705,
+ OpTypeAvcImeResultINTEL = 5706,
+ OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707,
+ OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708,
+ OpTypeAvcImeSingleReferenceStreaminINTEL = 5709,
+ OpTypeAvcImeDualReferenceStreaminINTEL = 5710,
+ OpTypeAvcRefResultINTEL = 5711,
+ OpTypeAvcSicResultINTEL = 5712,
+ OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713,
+ OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714,
+ OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715,
+ OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716,
+ OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717,
+ OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718,
+ OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719,
+ OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720,
+ OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721,
+ OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722,
+ OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723,
+ OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724,
+ OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725,
+ OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726,
+ OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727,
+ OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728,
+ OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729,
+ OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730,
+ OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731,
+ OpSubgroupAvcMceConvertToImePayloadINTEL = 5732,
+ OpSubgroupAvcMceConvertToImeResultINTEL = 5733,
+ OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734,
+ OpSubgroupAvcMceConvertToRefResultINTEL = 5735,
+ OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736,
+ OpSubgroupAvcMceConvertToSicResultINTEL = 5737,
+ OpSubgroupAvcMceGetMotionVectorsINTEL = 5738,
+ OpSubgroupAvcMceGetInterDistortionsINTEL = 5739,
+ OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740,
+ OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741,
+ OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742,
+ OpSubgroupAvcMceGetInterDirectionsINTEL = 5743,
+ OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744,
+ OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745,
+ OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746,
+ OpSubgroupAvcImeInitializeINTEL = 5747,
+ OpSubgroupAvcImeSetSingleReferenceINTEL = 5748,
+ OpSubgroupAvcImeSetDualReferenceINTEL = 5749,
+ OpSubgroupAvcImeRefWindowSizeINTEL = 5750,
+ OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751,
+ OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752,
+ OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753,
+ OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754,
+ OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755,
+ OpSubgroupAvcImeSetWeightedSadINTEL = 5756,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757,
+ OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764,
+ OpSubgroupAvcImeConvertToMceResultINTEL = 5765,
+ OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766,
+ OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767,
+ OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768,
+ OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775,
+ OpSubgroupAvcImeGetBorderReachedINTEL = 5776,
+ OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777,
+ OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778,
+ OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779,
+ OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780,
+ OpSubgroupAvcFmeInitializeINTEL = 5781,
+ OpSubgroupAvcBmeInitializeINTEL = 5782,
+ OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783,
+ OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784,
+ OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785,
+ OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786,
+ OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789,
+ OpSubgroupAvcRefConvertToMceResultINTEL = 5790,
+ OpSubgroupAvcSicInitializeINTEL = 5791,
+ OpSubgroupAvcSicConfigureSkcINTEL = 5792,
+ OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793,
+ OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794,
+ OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795,
+ OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796,
+ OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797,
+ OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798,
+ OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799,
+ OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800,
+ OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801,
+ OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802,
+ OpSubgroupAvcSicEvaluateIpeINTEL = 5803,
+ OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804,
+ OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807,
+ OpSubgroupAvcSicConvertToMceResultINTEL = 5808,
+ OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809,
+ OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810,
+ OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811,
+ OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812,
+ OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813,
+ OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,
+ OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,
+ OpSubgroupAvcSicGetInterRawSadsINTEL = 5816,
+ OpMax = 0x7fffffff,
+};
+
+// Overload operator| for mask bit combining
+
+inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }
+inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); }
+inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); }
+inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); }
+inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); }
+inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); }
+inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
+inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
+
+} // end namespace spv
+
+#endif // #ifndef spirv_HPP
+
diff --git a/src/3rdparty/glslang/SPIRV/spvIR.h b/src/3rdparty/glslang/SPIRV/spvIR.h
new file mode 100644
index 0000000..b3cd0b0
--- /dev/null
+++ b/src/3rdparty/glslang/SPIRV/spvIR.h
@@ -0,0 +1,441 @@
+//
+// Copyright (C) 2014 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+// SPIRV-IR
+//
+// Simple in-memory representation (IR) of SPIRV. Just for holding
+// Each function's CFG of blocks. Has this hierarchy:
+// - Module, which is a list of
+// - Function, which is a list of
+// - Block, which is a list of
+// - Instruction
+//
+
+#pragma once
+#ifndef spvIR_H
+#define spvIR_H
+
+#include "spirv.hpp"
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <vector>
+
+namespace spv {
+
+class Block;
+class Function;
+class Module;
+
+const Id NoResult = 0;
+const Id NoType = 0;
+
+const Decoration NoPrecision = DecorationMax;
+
+#ifdef __GNUC__
+# define POTENTIALLY_UNUSED __attribute__((unused))
+#else
+# define POTENTIALLY_UNUSED
+#endif
+
+POTENTIALLY_UNUSED
+const MemorySemanticsMask MemorySemanticsAllMemory =
+ (MemorySemanticsMask)(MemorySemanticsUniformMemoryMask |
+ MemorySemanticsWorkgroupMemoryMask |
+ MemorySemanticsAtomicCounterMemoryMask |
+ MemorySemanticsImageMemoryMask);
+
+struct IdImmediate {
+ bool isId; // true if word is an Id, false if word is an immediate
+ unsigned word;
+ IdImmediate(bool i, unsigned w) : isId(i), word(w) {}
+};
+
+//
+// SPIR-V IR instruction.
+//
+
+class Instruction {
+public:
+ Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { }
+ explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { }
+ virtual ~Instruction() {}
+ void addIdOperand(Id id) {
+ operands.push_back(id);
+ idOperand.push_back(true);
+ }
+ void addImmediateOperand(unsigned int immediate) {
+ operands.push_back(immediate);
+ idOperand.push_back(false);
+ }
+ void setImmediateOperand(unsigned idx, unsigned int immediate) {
+ assert(!idOperand[idx]);
+ operands[idx] = immediate;
+ }
+
+ void addStringOperand(const char* str)
+ {
+ unsigned int word;
+ char* wordString = (char*)&word;
+ char* wordPtr = wordString;
+ int charCount = 0;
+ char c;
+ do {
+ c = *(str++);
+ *(wordPtr++) = c;
+ ++charCount;
+ if (charCount == 4) {
+ addImmediateOperand(word);
+ wordPtr = wordString;
+ charCount = 0;
+ }
+ } while (c != 0);
+
+ // deal with partial last word
+ if (charCount > 0) {
+ // pad with 0s
+ for (; charCount < 4; ++charCount)
+ *(wordPtr++) = 0;
+ addImmediateOperand(word);
+ }
+ }
+ bool isIdOperand(int op) const { return idOperand[op]; }
+ void setBlock(Block* b) { block = b; }
+ Block* getBlock() const { return block; }
+ Op getOpCode() const { return opCode; }
+ int getNumOperands() const
+ {
+ assert(operands.size() == idOperand.size());
+ return (int)operands.size();
+ }
+ Id getResultId() const { return resultId; }
+ Id getTypeId() const { return typeId; }
+ Id getIdOperand(int op) const {
+ assert(idOperand[op]);
+ return operands[op];
+ }
+ unsigned int getImmediateOperand(int op) const {
+ assert(!idOperand[op]);
+ return operands[op];
+ }
+
+ // Write out the binary form.
+ void dump(std::vector<unsigned int>& out) const
+ {
+ // Compute the wordCount
+ unsigned int wordCount = 1;
+ if (typeId)
+ ++wordCount;
+ if (resultId)
+ ++wordCount;
+ wordCount += (unsigned int)operands.size();
+
+ // Write out the beginning of the instruction
+ out.push_back(((wordCount) << WordCountShift) | opCode);
+ if (typeId)
+ out.push_back(typeId);
+ if (resultId)
+ out.push_back(resultId);
+
+ // Write out the operands
+ for (int op = 0; op < (int)operands.size(); ++op)
+ out.push_back(operands[op]);
+ }
+
+protected:
+ Instruction(const Instruction&);
+ Id resultId;
+ Id typeId;
+ Op opCode;
+ std::vector<Id> operands; // operands, both <id> and immediates (both are unsigned int)
+ std::vector<bool> idOperand; // true for operands that are <id>, false for immediates
+ Block* block;
+};
+
+//
+// SPIR-V IR block.
+//
+
+class Block {
+public:
+ Block(Id id, Function& parent);
+ virtual ~Block()
+ {
+ }
+
+ Id getId() { return instructions.front()->getResultId(); }
+
+ Function& getParent() const { return parent; }
+ void addInstruction(std::unique_ptr<Instruction> inst);
+ void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);}
+ void addLocalVariable(std::unique_ptr<Instruction> inst) { localVariables.push_back(std::move(inst)); }
+ const std::vector<Block*>& getPredecessors() const { return predecessors; }
+ const std::vector<Block*>& getSuccessors() const { return successors; }
+ const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
+ return instructions;
+ }
+ const std::vector<std::unique_ptr<Instruction> >& getLocalVariables() const { return localVariables; }
+ void setUnreachable() { unreachable = true; }
+ bool isUnreachable() const { return unreachable; }
+ // Returns the block's merge instruction, if one exists (otherwise null).
+ const Instruction* getMergeInstruction() const {
+ if (instructions.size() < 2) return nullptr;
+ const Instruction* nextToLast = (instructions.cend() - 2)->get();
+ switch (nextToLast->getOpCode()) {
+ case OpSelectionMerge:
+ case OpLoopMerge:
+ return nextToLast;
+ default:
+ return nullptr;
+ }
+ return nullptr;
+ }
+
+ bool isTerminated() const
+ {
+ switch (instructions.back()->getOpCode()) {
+ case OpBranch:
+ case OpBranchConditional:
+ case OpSwitch:
+ case OpKill:
+ case OpReturn:
+ case OpReturnValue:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ instructions[0]->dump(out);
+ for (int i = 0; i < (int)localVariables.size(); ++i)
+ localVariables[i]->dump(out);
+ for (int i = 1; i < (int)instructions.size(); ++i)
+ instructions[i]->dump(out);
+ }
+
+protected:
+ Block(const Block&);
+ Block& operator=(Block&);
+
+ // To enforce keeping parent and ownership in sync:
+ friend Function;
+
+ std::vector<std::unique_ptr<Instruction> > instructions;
+ std::vector<Block*> predecessors, successors;
+ std::vector<std::unique_ptr<Instruction> > localVariables;
+ Function& parent;
+
+ // track whether this block is known to be uncreachable (not necessarily
+ // true for all unreachable blocks, but should be set at least
+ // for the extraneous ones introduced by the builder).
+ bool unreachable;
+};
+
+// Traverses the control-flow graph rooted at root in an order suited for
+// readable code generation. Invokes callback at every node in the traversal
+// order.
+void inReadableOrder(Block* root, std::function<void(Block*)> callback);
+
+//
+// SPIR-V IR Function.
+//
+
+class Function {
+public:
+ Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent);
+ virtual ~Function()
+ {
+ for (int i = 0; i < (int)parameterInstructions.size(); ++i)
+ delete parameterInstructions[i];
+
+ for (int i = 0; i < (int)blocks.size(); ++i)
+ delete blocks[i];
+ }
+ Id getId() const { return functionInstruction.getResultId(); }
+ Id getParamId(int p) const { return parameterInstructions[p]->getResultId(); }
+ Id getParamType(int p) const { return parameterInstructions[p]->getTypeId(); }
+
+ void addBlock(Block* block) { blocks.push_back(block); }
+ void removeBlock(Block* block)
+ {
+ auto found = find(blocks.begin(), blocks.end(), block);
+ assert(found != blocks.end());
+ blocks.erase(found);
+ delete block;
+ }
+
+ Module& getParent() const { return parent; }
+ Block* getEntryBlock() const { return blocks.front(); }
+ Block* getLastBlock() const { return blocks.back(); }
+ const std::vector<Block*>& getBlocks() const { return blocks; }
+ void addLocalVariable(std::unique_ptr<Instruction> inst);
+ Id getReturnType() const { return functionInstruction.getTypeId(); }
+
+ void setImplicitThis() { implicitThis = true; }
+ bool hasImplicitThis() const { return implicitThis; }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ // OpFunction
+ functionInstruction.dump(out);
+
+ // OpFunctionParameter
+ for (int p = 0; p < (int)parameterInstructions.size(); ++p)
+ parameterInstructions[p]->dump(out);
+
+ // Blocks
+ inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
+ Instruction end(0, 0, OpFunctionEnd);
+ end.dump(out);
+ }
+
+protected:
+ Function(const Function&);
+ Function& operator=(Function&);
+
+ Module& parent;
+ Instruction functionInstruction;
+ std::vector<Instruction*> parameterInstructions;
+ std::vector<Block*> blocks;
+ bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument
+};
+
+//
+// SPIR-V IR Module.
+//
+
+class Module {
+public:
+ Module() {}
+ virtual ~Module()
+ {
+ // TODO delete things
+ }
+
+ void addFunction(Function *fun) { functions.push_back(fun); }
+
+ void mapInstruction(Instruction *instruction)
+ {
+ spv::Id resultId = instruction->getResultId();
+ // map the instruction's result id
+ if (resultId >= idToInstruction.size())
+ idToInstruction.resize(resultId + 16);
+ idToInstruction[resultId] = instruction;
+ }
+
+ Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
+ const std::vector<Function*>& getFunctions() const { return functions; }
+ spv::Id getTypeId(Id resultId) const {
+ return idToInstruction[resultId] == nullptr ? NoType : idToInstruction[resultId]->getTypeId();
+ }
+ StorageClass getStorageClass(Id typeId) const
+ {
+ assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer);
+ return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0);
+ }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ for (int f = 0; f < (int)functions.size(); ++f)
+ functions[f]->dump(out);
+ }
+
+protected:
+ Module(const Module&);
+ std::vector<Function*> functions;
+
+ // map from result id to instruction having that result id
+ std::vector<Instruction*> idToInstruction;
+
+ // map from a result id to its type id
+};
+
+//
+// Implementation (it's here due to circular type definitions).
+//
+
+// Add both
+// - the OpFunction instruction
+// - all the OpFunctionParameter instructions
+__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent)
+ : parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false)
+{
+ // OpFunction
+ functionInstruction.addImmediateOperand(FunctionControlMaskNone);
+ functionInstruction.addIdOperand(functionType);
+ parent.mapInstruction(&functionInstruction);
+ parent.addFunction(this);
+
+ // OpFunctionParameter
+ Instruction* typeInst = parent.getInstruction(functionType);
+ int numParams = typeInst->getNumOperands() - 1;
+ for (int p = 0; p < numParams; ++p) {
+ Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter);
+ parent.mapInstruction(param);
+ parameterInstructions.push_back(param);
+ }
+}
+
+__inline void Function::addLocalVariable(std::unique_ptr<Instruction> inst)
+{
+ Instruction* raw_instruction = inst.get();
+ blocks[0]->addLocalVariable(std::move(inst));
+ parent.mapInstruction(raw_instruction);
+}
+
+__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false)
+{
+ instructions.push_back(std::unique_ptr<Instruction>(new Instruction(id, NoType, OpLabel)));
+ instructions.back()->setBlock(this);
+ parent.getParent().mapInstruction(instructions.back().get());
+}
+
+__inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
+{
+ Instruction* raw_instruction = inst.get();
+ instructions.push_back(std::move(inst));
+ raw_instruction->setBlock(this);
+ if (raw_instruction->getResultId())
+ parent.getParent().mapInstruction(raw_instruction);
+}
+
+}; // end spv namespace
+
+#endif // spvIR_H